2024-12-10 15:35:43,505 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 15:35:43,548 main DEBUG Took 0.039446 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-10 15:35:43,549 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-10 15:35:43,550 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-10 15:35:43,551 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-10 15:35:43,553 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,579 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-10 15:35:43,611 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,614 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,620 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,621 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,621 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,624 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,625 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,626 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,627 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,627 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,629 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,629 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,630 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,630 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,631 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,631 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,632 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,633 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,633 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,634 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,634 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,635 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,635 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,636 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-10 15:35:43,636 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,637 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-10 15:35:43,639 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-10 15:35:43,641 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-10 15:35:43,643 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-10 15:35:43,644 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-10 15:35:43,646 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-10 15:35:43,647 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-10 15:35:43,658 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-10 15:35:43,662 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-10 15:35:43,664 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-10 15:35:43,665 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-10 15:35:43,665 main DEBUG createAppenders(={Console}) 2024-12-10 15:35:43,667 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-10 15:35:43,667 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-10 15:35:43,667 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-10 15:35:43,668 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-10 15:35:43,669 main DEBUG OutputStream closed 2024-12-10 15:35:43,669 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-10 15:35:43,669 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-10 15:35:43,670 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-10 15:35:43,850 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-10 15:35:43,857 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-10 15:35:43,858 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-10 15:35:43,859 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-10 15:35:43,861 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-10 15:35:43,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-10 15:35:43,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-10 15:35:43,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-10 15:35:43,863 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-10 15:35:43,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-10 15:35:43,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-10 15:35:43,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-10 15:35:43,864 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-10 15:35:43,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-10 15:35:43,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-10 15:35:43,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-10 15:35:43,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-10 15:35:43,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-10 15:35:43,871 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10 15:35:43,871 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-10 15:35:43,872 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-10 15:35:43,872 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-10T15:35:44,303 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0 2024-12-10 15:35:44,307 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-10 15:35:44,308 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-10T15:35:44,323 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-10T15:35:44,368 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-10T15:35:44,372 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074, deleteOnExit=true 2024-12-10T15:35:44,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-10T15:35:44,374 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/test.cache.data in system properties and HBase conf 2024-12-10T15:35:44,375 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.tmp.dir in system properties and HBase conf 2024-12-10T15:35:44,376 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.log.dir in system properties and HBase conf 2024-12-10T15:35:44,377 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-10T15:35:44,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-10T15:35:44,378 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-10T15:35:44,603 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-10T15:35:44,811 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-10T15:35:44,820 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-10T15:35:44,821 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-10T15:35:44,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-10T15:35:44,824 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T15:35:44,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-10T15:35:44,831 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-10T15:35:44,837 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-10T15:35:44,840 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T15:35:44,843 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-10T15:35:44,844 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/nfs.dump.dir in system properties and HBase conf 2024-12-10T15:35:44,844 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/java.io.tmpdir in system properties and HBase conf 2024-12-10T15:35:44,844 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-10T15:35:44,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-10T15:35:44,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-10T15:35:46,758 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-10T15:35:46,927 INFO [Time-limited test {}] log.Log(170): Logging initialized @4470ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-10T15:35:47,053 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T15:35:47,188 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T15:35:47,290 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T15:35:47,291 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T15:35:47,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-10T15:35:47,376 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T15:35:47,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.log.dir/,AVAILABLE} 2024-12-10T15:35:47,389 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T15:35:47,801 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/java.io.tmpdir/jetty-localhost-40889-hadoop-hdfs-3_4_1-tests_jar-_-any-3890047289336618410/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T15:35:47,812 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:40889} 2024-12-10T15:35:47,813 INFO [Time-limited test {}] server.Server(415): Started @5357ms 2024-12-10T15:35:48,573 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-10T15:35:48,585 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-10T15:35:48,589 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-10T15:35:48,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-10T15:35:48,590 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-10T15:35:48,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.log.dir/,AVAILABLE} 2024-12-10T15:35:48,596 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-10T15:35:48,753 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/java.io.tmpdir/jetty-localhost-39589-hadoop-hdfs-3_4_1-tests_jar-_-any-4340399949480494214/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T15:35:48,755 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:39589} 2024-12-10T15:35:48,756 INFO [Time-limited test {}] server.Server(415): Started @6300ms 2024-12-10T15:35:48,825 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-10T15:35:50,049 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data2/current/BP-2139375084-172.17.0.2-1733844946170/current, will proceed with Du for space computation calculation, 2024-12-10T15:35:50,049 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data1/current/BP-2139375084-172.17.0.2-1733844946170/current, will proceed with Du for space computation calculation, 2024-12-10T15:35:50,119 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-10T15:35:50,217 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cbe7d8b41cbf4a1 with lease ID 0xd423d5b1187161e9: Processing first storage report for DS-0cfdb285-2980-4de8-a60b-2fefc415198e from datanode DatanodeRegistration(127.0.0.1:46053, datanodeUuid=7b36a362-3475-4661-8cbb-c0345cf63144, infoPort=36033, infoSecurePort=0, ipcPort=33135, storageInfo=lv=-57;cid=testClusterID;nsid=1109610272;c=1733844946170) 2024-12-10T15:35:50,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cbe7d8b41cbf4a1 with lease ID 0xd423d5b1187161e9: from storage DS-0cfdb285-2980-4de8-a60b-2fefc415198e node DatanodeRegistration(127.0.0.1:46053, datanodeUuid=7b36a362-3475-4661-8cbb-c0345cf63144, infoPort=36033, infoSecurePort=0, ipcPort=33135, storageInfo=lv=-57;cid=testClusterID;nsid=1109610272;c=1733844946170), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-10T15:35:50,218 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3cbe7d8b41cbf4a1 with lease ID 0xd423d5b1187161e9: Processing first storage report for DS-4a0e37c7-232a-49e0-ada5-17b2a3f1e43c from datanode DatanodeRegistration(127.0.0.1:46053, datanodeUuid=7b36a362-3475-4661-8cbb-c0345cf63144, infoPort=36033, infoSecurePort=0, ipcPort=33135, storageInfo=lv=-57;cid=testClusterID;nsid=1109610272;c=1733844946170) 2024-12-10T15:35:50,218 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3cbe7d8b41cbf4a1 with lease ID 0xd423d5b1187161e9: from storage DS-4a0e37c7-232a-49e0-ada5-17b2a3f1e43c node DatanodeRegistration(127.0.0.1:46053, datanodeUuid=7b36a362-3475-4661-8cbb-c0345cf63144, infoPort=36033, infoSecurePort=0, ipcPort=33135, storageInfo=lv=-57;cid=testClusterID;nsid=1109610272;c=1733844946170), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-10T15:35:50,249 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0 2024-12-10T15:35:50,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/zookeeper_0, clientPort=56346, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-10T15:35:50,479 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=56346 2024-12-10T15:35:50,527 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:50,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:51,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741825_1001 (size=7) 2024-12-10T15:35:51,597 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 with version=8 2024-12-10T15:35:51,599 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/hbase-staging 2024-12-10T15:35:51,781 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-10T15:35:52,129 INFO [Time-limited test {}] client.ConnectionUtils(129): master/bf0fec90ff6d:0 server-side Connection retries=45 2024-12-10T15:35:52,151 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:52,152 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:52,152 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T15:35:52,152 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:52,153 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T15:35:52,329 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T15:35:52,423 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-10T15:35:52,437 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-10T15:35:52,442 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T15:35:52,478 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 44382 (auto-detected) 2024-12-10T15:35:52,480 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-10T15:35:52,505 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33139 2024-12-10T15:35:52,515 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:52,519 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:52,534 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33139 connecting to ZooKeeper ensemble=127.0.0.1:56346 2024-12-10T15:35:52,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:331390x0, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T15:35:52,693 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33139-0x100109579e40000 connected 2024-12-10T15:35:52,856 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T15:35:52,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T15:35:52,870 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T15:35:52,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33139 2024-12-10T15:35:52,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33139 2024-12-10T15:35:52,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33139 2024-12-10T15:35:52,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33139 2024-12-10T15:35:52,893 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33139 2024-12-10T15:35:52,906 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935, hbase.cluster.distributed=false 2024-12-10T15:35:53,051 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/bf0fec90ff6d:0 server-side Connection retries=45 2024-12-10T15:35:53,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:53,051 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:53,052 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-10T15:35:53,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-10T15:35:53,052 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-10T15:35:53,055 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-10T15:35:53,058 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-10T15:35:53,059 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46239 2024-12-10T15:35:53,062 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-10T15:35:53,083 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-10T15:35:53,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:53,094 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:53,101 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46239 connecting to ZooKeeper ensemble=127.0.0.1:56346 2024-12-10T15:35:53,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462390x0, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-10T15:35:53,125 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46239-0x100109579e40001 connected 2024-12-10T15:35:53,127 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T15:35:53,129 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T15:35:53,132 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-10T15:35:53,135 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-10T15:35:53,137 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46239 2024-12-10T15:35:53,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46239 2024-12-10T15:35:53,143 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-10T15:35:53,144 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46239 2024-12-10T15:35:53,164 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:53,176 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;bf0fec90ff6d:33139 2024-12-10T15:35:53,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T15:35:53,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T15:35:53,183 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:53,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T15:35:53,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:53,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-10T15:35:53,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:53,232 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T15:35:53,235 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-10T15:35:53,235 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/bf0fec90ff6d,33139,1733844951772 from backup master directory 2024-12-10T15:35:53,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:53,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T15:35:53,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-10T15:35:53,251 WARN [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T15:35:53,251 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:53,254 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-10T15:35:53,255 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-10T15:35:53,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741826_1002 (size=42) 2024-12-10T15:35:53,807 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/hbase.id with ID: 3d40270d-059d-4a44-b19b-ad1358e88ac7 2024-12-10T15:35:53,882 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-10T15:35:53,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:53,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:54,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741827_1003 (size=196) 2024-12-10T15:35:54,023 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:35:54,025 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-10T15:35:54,043 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:35:54,047 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T15:35:54,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741828_1004 (size=1189) 2024-12-10T15:35:54,101 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store 2024-12-10T15:35:54,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741829_1005 (size=34) 2024-12-10T15:35:54,578 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-10T15:35:54,579 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:54,580 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T15:35:54,580 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:35:54,581 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:35:54,581 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T15:35:54,581 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:35:54,581 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:35:54,581 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T15:35:54,584 WARN [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/.initializing 2024-12-10T15:35:54,584 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/WALs/bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:54,591 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T15:35:54,606 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bf0fec90ff6d%2C33139%2C1733844951772, suffix=, logDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/WALs/bf0fec90ff6d,33139,1733844951772, archiveDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/oldWALs, maxLogs=10 2024-12-10T15:35:54,634 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/WALs/bf0fec90ff6d,33139,1733844951772/bf0fec90ff6d%2C33139%2C1733844951772.1733844954612, exclude list is [], retry=0 2024-12-10T15:35:54,653 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-0cfdb285-2980-4de8-a60b-2fefc415198e,DISK] 2024-12-10T15:35:54,656 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-10T15:35:54,701 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/WALs/bf0fec90ff6d,33139,1733844951772/bf0fec90ff6d%2C33139%2C1733844951772.1733844954612 2024-12-10T15:35:54,702 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36033:36033)] 2024-12-10T15:35:54,703 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:35:54,704 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:54,708 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,709 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,767 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,812 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-10T15:35:54,818 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:54,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:54,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,842 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-10T15:35:54,843 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:54,844 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:54,845 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,855 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-10T15:35:54,856 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:54,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:54,859 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,868 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-10T15:35:54,868 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:54,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:54,883 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,885 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,910 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-10T15:35:54,918 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-10T15:35:54,932 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:35:54,940 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66133055, jitterRate=-0.014540687203407288}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-10T15:35:54,946 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T15:35:54,947 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-10T15:35:54,992 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a9a85b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:35:55,035 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-10T15:35:55,048 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-10T15:35:55,049 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-10T15:35:55,051 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-10T15:35:55,053 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-10T15:35:55,058 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-10T15:35:55,059 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-10T15:35:55,090 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-10T15:35:55,106 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-10T15:35:55,140 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-10T15:35:55,143 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-10T15:35:55,144 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-10T15:35:55,165 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-10T15:35:55,167 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-10T15:35:55,171 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-10T15:35:55,190 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-10T15:35:55,191 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-10T15:35:55,198 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-10T15:35:55,212 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-10T15:35:55,223 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-10T15:35:55,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T15:35:55,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-10T15:35:55,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,238 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=bf0fec90ff6d,33139,1733844951772, sessionid=0x100109579e40000, setting cluster-up flag (Was=false) 2024-12-10T15:35:55,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,290 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-10T15:35:55,292 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:55,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:55,340 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-10T15:35:55,342 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:55,385 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;bf0fec90ff6d:46239 2024-12-10T15:35:55,387 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1008): ClusterId : 3d40270d-059d-4a44-b19b-ad1358e88ac7 2024-12-10T15:35:55,390 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-10T15:35:55,408 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-10T15:35:55,409 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-10T15:35:55,428 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-10T15:35:55,429 DEBUG [RS:0;bf0fec90ff6d:46239 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@583ac483, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:35:55,431 DEBUG [RS:0;bf0fec90ff6d:46239 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dbf263f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bf0fec90ff6d/172.17.0.2:0 2024-12-10T15:35:55,435 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-10T15:35:55,435 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-10T15:35:55,435 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-10T15:35:55,438 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(3073): reportForDuty to master=bf0fec90ff6d,33139,1733844951772 with isa=bf0fec90ff6d/172.17.0.2:46239, startcode=1733844953049 2024-12-10T15:35:55,452 DEBUG [RS:0;bf0fec90ff6d:46239 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T15:35:55,456 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-10T15:35:55,462 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-10T15:35:55,466 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-10T15:35:55,472 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: bf0fec90ff6d,33139,1733844951772 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-10T15:35:55,476 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/bf0fec90ff6d:0, corePoolSize=5, maxPoolSize=5 2024-12-10T15:35:55,476 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/bf0fec90ff6d:0, corePoolSize=5, maxPoolSize=5 2024-12-10T15:35:55,476 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/bf0fec90ff6d:0, corePoolSize=5, maxPoolSize=5 2024-12-10T15:35:55,476 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/bf0fec90ff6d:0, corePoolSize=5, maxPoolSize=5 2024-12-10T15:35:55,477 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/bf0fec90ff6d:0, corePoolSize=10, maxPoolSize=10 2024-12-10T15:35:55,477 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,477 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/bf0fec90ff6d:0, corePoolSize=2, maxPoolSize=2 2024-12-10T15:35:55,477 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,511 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46513, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T15:35:55,515 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T15:35:55,516 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-10T15:35:55,518 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733844985518 2024-12-10T15:35:55,518 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33139 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:35:55,520 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-10T15:35:55,521 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-10T15:35:55,525 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:55,526 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T15:35:55,529 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-10T15:35:55,530 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-10T15:35:55,530 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-10T15:35:55,530 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-10T15:35:55,531 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,544 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-10T15:35:55,545 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-10T15:35:55,546 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-10T15:35:55,551 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-10T15:35:55,551 WARN [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-10T15:35:55,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741831_1007 (size=1039) 2024-12-10T15:35:55,557 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-10T15:35:55,557 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:35:55,559 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-10T15:35:55,560 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-10T15:35:55,563 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.large.0-1733844955561,5,FailOnTimeoutGroup] 2024-12-10T15:35:55,567 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.small.0-1733844955564,5,FailOnTimeoutGroup] 2024-12-10T15:35:55,567 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,568 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-10T15:35:55,569 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,570 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741832_1008 (size=32) 2024-12-10T15:35:55,652 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(3073): reportForDuty to master=bf0fec90ff6d,33139,1733844951772 with isa=bf0fec90ff6d/172.17.0.2:46239, startcode=1733844953049 2024-12-10T15:35:55,654 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33139 {}] master.ServerManager(332): Checking decommissioned status of RegionServer bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,656 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33139 {}] master.ServerManager(486): Registering regionserver=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,665 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:35:55,665 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41507 2024-12-10T15:35:55,665 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-10T15:35:55,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T15:35:55,673 DEBUG [RS:0;bf0fec90ff6d:46239 {}] zookeeper.ZKUtil(111): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,674 WARN [RS:0;bf0fec90ff6d:46239 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-10T15:35:55,674 INFO [RS:0;bf0fec90ff6d:46239 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T15:35:55,674 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,676 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [bf0fec90ff6d,46239,1733844953049] 2024-12-10T15:35:55,687 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-10T15:35:55,698 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-10T15:35:55,710 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-10T15:35:55,712 INFO [RS:0;bf0fec90ff6d:46239 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-10T15:35:55,712 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,713 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-10T15:35:55,720 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,720 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,720 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,720 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,720 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/bf0fec90ff6d:0, corePoolSize=2, maxPoolSize=2 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,721 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/bf0fec90ff6d:0, corePoolSize=1, maxPoolSize=1 2024-12-10T15:35:55,722 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/bf0fec90ff6d:0, corePoolSize=3, maxPoolSize=3 2024-12-10T15:35:55,722 DEBUG [RS:0;bf0fec90ff6d:46239 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0, corePoolSize=3, maxPoolSize=3 2024-12-10T15:35:55,722 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,723 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,723 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,723 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,723 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,46239,1733844953049-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T15:35:55,750 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-10T15:35:55,753 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,46239,1733844953049-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:55,779 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.Replication(204): bf0fec90ff6d,46239,1733844953049 started 2024-12-10T15:35:55,779 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1767): Serving as bf0fec90ff6d,46239,1733844953049, RpcServer on bf0fec90ff6d/172.17.0.2:46239, sessionid=0x100109579e40001 2024-12-10T15:35:55,780 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-10T15:35:55,780 DEBUG [RS:0;bf0fec90ff6d:46239 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,780 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bf0fec90ff6d,46239,1733844953049' 2024-12-10T15:35:55,780 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-10T15:35:55,781 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-10T15:35:55,782 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-10T15:35:55,782 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-10T15:35:55,782 DEBUG [RS:0;bf0fec90ff6d:46239 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:55,782 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'bf0fec90ff6d,46239,1733844953049' 2024-12-10T15:35:55,782 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-10T15:35:55,783 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-10T15:35:55,784 DEBUG [RS:0;bf0fec90ff6d:46239 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-10T15:35:55,784 INFO [RS:0;bf0fec90ff6d:46239 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-10T15:35:55,784 INFO [RS:0;bf0fec90ff6d:46239 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-10T15:35:55,889 INFO [RS:0;bf0fec90ff6d:46239 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-10T15:35:55,892 INFO [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bf0fec90ff6d%2C46239%2C1733844953049, suffix=, logDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049, archiveDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/oldWALs, maxLogs=32 2024-12-10T15:35:55,909 DEBUG [RS:0;bf0fec90ff6d:46239 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049/bf0fec90ff6d%2C46239%2C1733844953049.1733844955894, exclude list is [], retry=0 2024-12-10T15:35:55,917 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-0cfdb285-2980-4de8-a60b-2fefc415198e,DISK] 2024-12-10T15:35:55,970 INFO [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049/bf0fec90ff6d%2C46239%2C1733844953049.1733844955894 2024-12-10T15:35:55,980 DEBUG [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36033:36033)] 2024-12-10T15:35:55,988 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:56,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T15:35:56,035 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T15:35:56,036 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,042 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T15:35:56,056 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T15:35:56,056 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T15:35:56,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T15:35:56,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,080 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740 2024-12-10T15:35:56,092 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740 2024-12-10T15:35:56,104 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:35:56,122 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T15:35:56,143 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:35:56,147 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66290935, jitterRate=-0.01218809187412262}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:35:56,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T15:35:56,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T15:35:56,151 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T15:35:56,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T15:35:56,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T15:35:56,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T15:35:56,162 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T15:35:56,162 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T15:35:56,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-10T15:35:56,174 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-10T15:35:56,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-10T15:35:56,203 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-10T15:35:56,212 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-10T15:35:56,371 DEBUG [bf0fec90ff6d:33139 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-10T15:35:56,376 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:56,382 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bf0fec90ff6d,46239,1733844953049, state=OPENING 2024-12-10T15:35:56,429 DEBUG [PEWorker-5 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-10T15:35:56,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:56,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:56,441 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T15:35:56,441 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T15:35:56,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:35:56,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:56,625 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-10T15:35:56,630 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-10T15:35:56,665 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-10T15:35:56,666 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-10T15:35:56,667 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-10T15:35:56,676 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=bf0fec90ff6d%2C46239%2C1733844953049.meta, suffix=.meta, logDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049, archiveDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/oldWALs, maxLogs=32 2024-12-10T15:35:56,696 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049/bf0fec90ff6d%2C46239%2C1733844953049.meta.1733844956678.meta, exclude list is [], retry=0 2024-12-10T15:35:56,705 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46053,DS-0cfdb285-2980-4de8-a60b-2fefc415198e,DISK] 2024-12-10T15:35:56,715 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049/bf0fec90ff6d%2C46239%2C1733844953049.meta.1733844956678.meta 2024-12-10T15:35:56,719 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36033:36033)] 2024-12-10T15:35:56,720 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:35:56,722 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-10T15:35:56,799 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-10T15:35:56,805 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-10T15:35:56,810 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-10T15:35:56,810 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:56,810 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-10T15:35:56,811 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-10T15:35:56,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-10T15:35:56,820 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-10T15:35:56,820 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,821 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,822 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-10T15:35:56,823 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-10T15:35:56,824 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,825 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-10T15:35:56,831 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-10T15:35:56,831 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:56,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-10T15:35:56,835 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740 2024-12-10T15:35:56,839 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740 2024-12-10T15:35:56,842 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:35:56,845 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-10T15:35:56,847 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69044570, jitterRate=0.02884426712989807}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:35:56,848 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-10T15:35:56,860 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733844956616 2024-12-10T15:35:56,875 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:56,876 DEBUG [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-10T15:35:56,876 INFO [RS_OPEN_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-10T15:35:56,878 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as bf0fec90ff6d,46239,1733844953049, state=OPEN 2024-12-10T15:35:56,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T15:35:56,942 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-10T15:35:56,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T15:35:56,942 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-10T15:35:56,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-10T15:35:56,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=bf0fec90ff6d,46239,1733844953049 in 499 msec 2024-12-10T15:35:56,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-10T15:35:56,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 773 msec 2024-12-10T15:35:56,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5730 sec 2024-12-10T15:35:56,977 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733844956977, completionTime=-1 2024-12-10T15:35:56,977 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-10T15:35:56,977 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-10T15:35:57,038 DEBUG [hconnection-0xddc02ee-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:35:57,044 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:35:57,059 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-10T15:35:57,059 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733845017059 2024-12-10T15:35:57,060 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733845077060 2024-12-10T15:35:57,060 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 82 msec 2024-12-10T15:35:57,118 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:57,118 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:57,119 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:57,120 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-bf0fec90ff6d:33139, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:57,121 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:57,126 DEBUG [master/bf0fec90ff6d:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-10T15:35:57,131 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-10T15:35:57,133 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-10T15:35:57,145 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-10T15:35:57,155 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:35:57,157 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:57,171 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:35:57,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741835_1011 (size=358) 2024-12-10T15:35:57,212 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => be7c8615eacea7669b98cff7543a195b, NAME => 'hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:35:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741836_1012 (size=42) 2024-12-10T15:35:57,648 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:57,648 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing be7c8615eacea7669b98cff7543a195b, disabling compactions & flushes 2024-12-10T15:35:57,648 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:57,648 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:57,648 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. after waiting 0 ms 2024-12-10T15:35:57,648 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:57,649 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:57,649 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for be7c8615eacea7669b98cff7543a195b: 2024-12-10T15:35:57,653 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:35:57,661 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733844957655"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733844957655"}]},"ts":"1733844957655"} 2024-12-10T15:35:57,685 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:35:57,688 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:35:57,691 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844957688"}]},"ts":"1733844957688"} 2024-12-10T15:35:57,697 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-10T15:35:57,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=be7c8615eacea7669b98cff7543a195b, ASSIGN}] 2024-12-10T15:35:57,754 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=be7c8615eacea7669b98cff7543a195b, ASSIGN 2024-12-10T15:35:57,756 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=be7c8615eacea7669b98cff7543a195b, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:35:57,907 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=be7c8615eacea7669b98cff7543a195b, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:57,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure be7c8615eacea7669b98cff7543a195b, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:35:58,069 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:58,078 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:58,078 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => be7c8615eacea7669b98cff7543a195b, NAME => 'hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:35:58,079 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,079 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:58,080 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,080 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,090 INFO [StoreOpener-be7c8615eacea7669b98cff7543a195b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,093 INFO [StoreOpener-be7c8615eacea7669b98cff7543a195b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be7c8615eacea7669b98cff7543a195b columnFamilyName info 2024-12-10T15:35:58,094 DEBUG [StoreOpener-be7c8615eacea7669b98cff7543a195b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:58,094 INFO [StoreOpener-be7c8615eacea7669b98cff7543a195b-1 {}] regionserver.HStore(327): Store=be7c8615eacea7669b98cff7543a195b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:58,096 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,097 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,100 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for be7c8615eacea7669b98cff7543a195b 2024-12-10T15:35:58,104 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:35:58,105 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened be7c8615eacea7669b98cff7543a195b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62925135, jitterRate=-0.06234242022037506}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-10T15:35:58,106 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for be7c8615eacea7669b98cff7543a195b: 2024-12-10T15:35:58,108 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b., pid=6, masterSystemTime=1733844958069 2024-12-10T15:35:58,113 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:58,113 INFO [RS_OPEN_PRIORITY_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:35:58,113 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=be7c8615eacea7669b98cff7543a195b, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:58,122 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-10T15:35:58,123 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure be7c8615eacea7669b98cff7543a195b, server=bf0fec90ff6d,46239,1733844953049 in 203 msec 2024-12-10T15:35:58,125 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-10T15:35:58,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=be7c8615eacea7669b98cff7543a195b, ASSIGN in 372 msec 2024-12-10T15:35:58,127 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:35:58,127 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844958127"}]},"ts":"1733844958127"} 2024-12-10T15:35:58,135 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-10T15:35:58,181 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-10T15:35:58,181 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:35:58,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.0480 sec 2024-12-10T15:35:58,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:58,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-10T15:35:58,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:35:58,228 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-10T15:35:58,256 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T15:35:58,271 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 48 msec 2024-12-10T15:35:58,286 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-10T15:35:58,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-10T15:35:58,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 84 msec 2024-12-10T15:35:58,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-10T15:35:58,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-10T15:35:58,429 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.178sec 2024-12-10T15:35:58,431 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-10T15:35:58,433 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-10T15:35:58,434 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-10T15:35:58,441 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-10T15:35:58,441 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-10T15:35:58,443 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-10T15:35:58,451 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-10T15:35:58,534 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-12-10T15:35:58,538 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-10T15:35:58,539 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-10T15:35:58,541 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-10T15:35:58,541 INFO [master/bf0fec90ff6d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=bf0fec90ff6d,33139,1733844951772-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-10T15:35:58,609 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:35:58,620 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-10T15:35:58,620 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-10T15:35:58,685 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:35:58,720 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:35:58,739 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=bf0fec90ff6d,33139,1733844951772 2024-12-10T15:35:58,787 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=918, ProcessCount=11, AvailableMemoryMB=2169 2024-12-10T15:35:58,801 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:35:58,835 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:35:58,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:35:58,850 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:35:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:35:58,873 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:35:58,874 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:58,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-10T15:35:58,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T15:35:58,933 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:35:59,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741837_1013 (size=963) 2024-12-10T15:35:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T15:35:59,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T15:35:59,400 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:35:59,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741838_1014 (size=53) 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8a8b39bcfc5042b2f61256808771f62a, disabling compactions & flushes 2024-12-10T15:35:59,421 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. after waiting 0 ms 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,421 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,421 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:35:59,423 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:35:59,424 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733844959423"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733844959423"}]},"ts":"1733844959423"} 2024-12-10T15:35:59,426 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:35:59,429 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:35:59,429 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844959429"}]},"ts":"1733844959429"} 2024-12-10T15:35:59,433 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:35:59,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, ASSIGN}] 2024-12-10T15:35:59,463 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, ASSIGN 2024-12-10T15:35:59,466 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:35:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T15:35:59,617 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=8a8b39bcfc5042b2f61256808771f62a, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:59,621 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:35:59,775 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:59,781 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,781 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:35:59,781 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,782 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:35:59,782 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,782 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,784 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,815 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:35:59,815 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a8b39bcfc5042b2f61256808771f62a columnFamilyName A 2024-12-10T15:35:59,815 DEBUG [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:59,817 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(327): Store=8a8b39bcfc5042b2f61256808771f62a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:59,817 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,819 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:35:59,820 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a8b39bcfc5042b2f61256808771f62a columnFamilyName B 2024-12-10T15:35:59,820 DEBUG [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:59,821 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(327): Store=8a8b39bcfc5042b2f61256808771f62a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:59,821 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,826 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:35:59,826 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a8b39bcfc5042b2f61256808771f62a columnFamilyName C 2024-12-10T15:35:59,826 DEBUG [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:35:59,827 INFO [StoreOpener-8a8b39bcfc5042b2f61256808771f62a-1 {}] regionserver.HStore(327): Store=8a8b39bcfc5042b2f61256808771f62a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:35:59,828 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,830 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,830 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,834 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:35:59,839 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:35:59,843 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:35:59,845 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 8a8b39bcfc5042b2f61256808771f62a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69953885, jitterRate=0.04239411652088165}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:35:59,846 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:35:59,848 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., pid=11, masterSystemTime=1733844959774 2024-12-10T15:35:59,851 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,852 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:35:59,853 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=8a8b39bcfc5042b2f61256808771f62a, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:35:59,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-10T15:35:59,862 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 in 236 msec 2024-12-10T15:35:59,873 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:35:59,873 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844959873"}]},"ts":"1733844959873"} 2024-12-10T15:35:59,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-10T15:35:59,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, ASSIGN in 403 msec 2024-12-10T15:35:59,876 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:35:59,892 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:35:59,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.0390 sec 2024-12-10T15:36:00,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-10T15:36:00,042 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-10T15:36:00,047 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e67f019 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fcb5f29 2024-12-10T15:36:00,064 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fdf5682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,071 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,084 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,096 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:36:00,105 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:36:00,121 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d38d10 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f343a4d 2024-12-10T15:36:00,141 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12885408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,142 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6c63ae4e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22cb07dd 2024-12-10T15:36:00,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62c43377, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,156 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x736f1673 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@478bae6b 2024-12-10T15:36:00,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4977266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,184 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ee2166f to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5400112e 2024-12-10T15:36:00,210 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a8f4734, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f34ff67 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38766d64 2024-12-10T15:36:00,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18603bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,232 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4b5cad1a to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@295cb1ac 2024-12-10T15:36:00,256 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e97e4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,257 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c3b736e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@70267494 2024-12-10T15:36:00,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@490457fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,307 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x767a8485 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1d2a8e08 2024-12-10T15:36:00,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c8de680, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,329 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6502d571 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c915d17 2024-12-10T15:36:00,340 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f6b07e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:00,363 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:00,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-10T15:36:00,369 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:00,371 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:00,371 DEBUG [hconnection-0x2216ae23-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:00,372 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:00,375 DEBUG [hconnection-0x5d4164ec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,377 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,380 DEBUG [hconnection-0x3d0fdd06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,382 DEBUG [hconnection-0x40eb5aeb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,383 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,403 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,419 DEBUG [hconnection-0x19cc3c4a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,428 DEBUG [hconnection-0x14df392f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,432 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36406, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,451 DEBUG [hconnection-0x5f7b91cc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,461 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:00,467 DEBUG [hconnection-0x84e7a38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,469 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:00,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:00,470 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:00,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:00,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:00,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:00,471 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:00,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:00,504 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,504 DEBUG [hconnection-0x685e87a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:00,509 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36442, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,510 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,521 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:00,541 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:00,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:00,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/c259330c349c4dac95a47ad661d2af66 is 50, key is test_row_0/A:col10/1733844960432/Put/seqid=0 2024-12-10T15:36:00,739 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:00,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:00,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845020744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845020771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845020777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845020789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845020790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741839_1015 (size=12001) 2024-12-10T15:36:00,898 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845020893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845020903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,908 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,916 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:00,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845020916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845020918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:00,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845020920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:00,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:00,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:00,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:00,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:01,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845021154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845021156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845021157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845021160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845021162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,245 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,248 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/c259330c349c4dac95a47ad661d2af66 2024-12-10T15:36:01,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,410 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,411 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845021478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:01,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845021478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845021478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/23125e145e97451f8644260d1a3ac40f is 50, key is test_row_0/B:col10/1733844960432/Put/seqid=0 2024-12-10T15:36:01,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845021492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:01,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845021505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741840_1016 (size=12001) 2024-12-10T15:36:01,567 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/23125e145e97451f8644260d1a3ac40f 2024-12-10T15:36:01,575 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,577 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/6a319f6f1c1544c58f26864732f1f9d4 is 50, key is test_row_0/C:col10/1733844960432/Put/seqid=0 2024-12-10T15:36:01,694 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T15:36:01,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741841_1017 (size=12001) 2024-12-10T15:36:01,699 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-10T15:36:01,708 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-10T15:36:01,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:01,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:01,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:01,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:01,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:01,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:02,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:02,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845022013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:02,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845022019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:02,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845022025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:02,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845022025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:02,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845022032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:02,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:02,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:02,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:02,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:02,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:02,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:02,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/6a319f6f1c1544c58f26864732f1f9d4 2024-12-10T15:36:02,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/c259330c349c4dac95a47ad661d2af66 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66 2024-12-10T15:36:02,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T15:36:02,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/23125e145e97451f8644260d1a3ac40f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f 2024-12-10T15:36:02,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T15:36:02,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/6a319f6f1c1544c58f26864732f1f9d4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4 2024-12-10T15:36:02,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4, entries=150, sequenceid=12, filesize=11.7 K 2024-12-10T15:36:02,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8a8b39bcfc5042b2f61256808771f62a in 1726ms, sequenceid=12, compaction requested=false 2024-12-10T15:36:02,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:02,211 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:02,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-10T15:36:02,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:02,217 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:36:02,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:02,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:02,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:02,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:02,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:02,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:02,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/215d7470eb4d4386a919b1e4de1bd1e5 is 50, key is test_row_0/A:col10/1733844960706/Put/seqid=0 2024-12-10T15:36:02,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741842_1018 (size=12001) 2024-12-10T15:36:02,315 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/215d7470eb4d4386a919b1e4de1bd1e5 2024-12-10T15:36:02,323 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:36:02,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/d385beb5cc0443d1b0c2dffbe92c1f39 is 50, key is test_row_0/B:col10/1733844960706/Put/seqid=0 2024-12-10T15:36:02,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741843_1019 (size=12001) 2024-12-10T15:36:02,395 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/d385beb5cc0443d1b0c2dffbe92c1f39 2024-12-10T15:36:02,447 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T15:36:02,462 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-10T15:36:02,464 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T15:36:02,464 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-10T15:36:02,465 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T15:36:02,465 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-10T15:36:02,465 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-10T15:36:02,465 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-10T15:36:02,466 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:36:02,466 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T15:36:02,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:02,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/adf65e92515a4f56898699def1547296 is 50, key is test_row_0/C:col10/1733844960706/Put/seqid=0 2024-12-10T15:36:02,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741844_1020 (size=12001) 2024-12-10T15:36:02,959 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/adf65e92515a4f56898699def1547296 2024-12-10T15:36:03,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/215d7470eb4d4386a919b1e4de1bd1e5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5 2024-12-10T15:36:03,038 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:03,042 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T15:36:03,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/d385beb5cc0443d1b0c2dffbe92c1f39 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39 2024-12-10T15:36:03,069 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T15:36:03,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/adf65e92515a4f56898699def1547296 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296 2024-12-10T15:36:03,096 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296, entries=150, sequenceid=37, filesize=11.7 K 2024-12-10T15:36:03,111 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=46.96 KB/48090 for 8a8b39bcfc5042b2f61256808771f62a in 894ms, sequenceid=37, compaction requested=false 2024-12-10T15:36:03,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:03,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:03,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-10T15:36:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:03,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:03,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-10T15:36:03,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bfcb8760bf004bf19c2e0ef1098c76ed is 50, key is test_row_0/A:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:03,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-10T15:36:03,147 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7510 sec 2024-12-10T15:36:03,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.7880 sec 2024-12-10T15:36:03,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741845_1021 (size=23705) 2024-12-10T15:36:03,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845023230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845023301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845023303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845023307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845023309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845023418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845023437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845023438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845023439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845023442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bfcb8760bf004bf19c2e0ef1098c76ed 2024-12-10T15:36:03,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845023626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0896aa30d6b644d0bd5476003903c8f1 is 50, key is test_row_0/B:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:03,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845023644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845023648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845023659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:03,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845023661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:03,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741846_1022 (size=12001) 2024-12-10T15:36:03,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0896aa30d6b644d0bd5476003903c8f1 2024-12-10T15:36:03,778 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/8d0c56f0e4e94467b1c9ae8703309df4 is 50, key is test_row_0/C:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741847_1023 (size=12001) 2024-12-10T15:36:03,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/8d0c56f0e4e94467b1c9ae8703309df4 2024-12-10T15:36:03,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bfcb8760bf004bf19c2e0ef1098c76ed as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed 2024-12-10T15:36:03,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed, entries=400, sequenceid=50, filesize=23.1 K 2024-12-10T15:36:03,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0896aa30d6b644d0bd5476003903c8f1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1 2024-12-10T15:36:03,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1, entries=150, sequenceid=50, filesize=11.7 K 2024-12-10T15:36:03,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/8d0c56f0e4e94467b1c9ae8703309df4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4 2024-12-10T15:36:03,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4, entries=150, sequenceid=50, filesize=11.7 K 2024-12-10T15:36:03,923 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8a8b39bcfc5042b2f61256808771f62a in 809ms, sequenceid=50, compaction requested=true 2024-12-10T15:36:03,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:03,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:03,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:03,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:03,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:03,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:03,931 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:03,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:03,942 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:03,945 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 47707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:03,948 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:03,948 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:03,948 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.2 K 2024-12-10T15:36:03,949 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:03,949 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:03,949 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=46.6 K 2024-12-10T15:36:03,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:03,956 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 23125e145e97451f8644260d1a3ac40f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733844960417 2024-12-10T15:36:03,957 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:03,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:03,959 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c259330c349c4dac95a47ad661d2af66, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733844960417 2024-12-10T15:36:03,963 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 215d7470eb4d4386a919b1e4de1bd1e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733844960706 2024-12-10T15:36:03,963 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d385beb5cc0443d1b0c2dffbe92c1f39, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733844960706 2024-12-10T15:36:03,967 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfcb8760bf004bf19c2e0ef1098c76ed, keycount=400, bloomtype=ROW, size=23.1 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963047 2024-12-10T15:36:03,973 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0896aa30d6b644d0bd5476003903c8f1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963091 2024-12-10T15:36:04,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/7e9b103fb147492b801527e4794f93f0 is 50, key is test_row_0/A:col10/1733844963300/Put/seqid=0 2024-12-10T15:36:04,058 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#10 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:04,059 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/4cd421036f114eeda8c43242954b9678 is 50, key is test_row_0/B:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:04,060 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#11 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:04,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845024012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,061 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/1514479717e546ec9e871cb3dc2e3f76 is 50, key is test_row_0/A:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:04,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845024016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845024039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845024062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741848_1024 (size=14341) 2024-12-10T15:36:04,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/7e9b103fb147492b801527e4794f93f0 2024-12-10T15:36:04,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741849_1025 (size=12104) 2024-12-10T15:36:04,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845024068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,101 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/1514479717e546ec9e871cb3dc2e3f76 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/1514479717e546ec9e871cb3dc2e3f76 2024-12-10T15:36:04,125 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 1514479717e546ec9e871cb3dc2e3f76(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:04,125 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:04,125 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844963924; duration=0sec 2024-12-10T15:36:04,126 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:04,126 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:04,126 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:04,133 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:04,133 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:04,133 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,134 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.2 K 2024-12-10T15:36:04,134 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a319f6f1c1544c58f26864732f1f9d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733844960417 2024-12-10T15:36:04,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/ae36353a9eec4e9d8c140cf07accbc8e is 50, key is test_row_0/B:col10/1733844963300/Put/seqid=0 2024-12-10T15:36:04,138 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting adf65e92515a4f56898699def1547296, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733844960706 2024-12-10T15:36:04,140 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d0c56f0e4e94467b1c9ae8703309df4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963091 2024-12-10T15:36:04,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741850_1026 (size=12104) 2024-12-10T15:36:04,163 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/4cd421036f114eeda8c43242954b9678 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/4cd421036f114eeda8c43242954b9678 2024-12-10T15:36:04,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845024176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845024176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,184 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 4cd421036f114eeda8c43242954b9678(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:04,184 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:04,184 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844963931; duration=0sec 2024-12-10T15:36:04,184 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:04,184 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:04,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741851_1027 (size=12001) 2024-12-10T15:36:04,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845024176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845024186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/ae36353a9eec4e9d8c140cf07accbc8e 2024-12-10T15:36:04,201 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#13 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:04,202 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5b92018f495b4d738f208cc8c93e60b0 is 50, key is test_row_0/C:col10/1733844963113/Put/seqid=0 2024-12-10T15:36:04,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845024204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/2353915cefea441483bb9bd5a03b4039 is 50, key is test_row_0/C:col10/1733844963300/Put/seqid=0 2024-12-10T15:36:04,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741852_1028 (size=12104) 2024-12-10T15:36:04,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741853_1029 (size=12001) 2024-12-10T15:36:04,274 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5b92018f495b4d738f208cc8c93e60b0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5b92018f495b4d738f208cc8c93e60b0 2024-12-10T15:36:04,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/2353915cefea441483bb9bd5a03b4039 2024-12-10T15:36:04,292 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into 5b92018f495b4d738f208cc8c93e60b0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:04,292 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:04,292 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844963931; duration=0sec 2024-12-10T15:36:04,292 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:04,292 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:04,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/7e9b103fb147492b801527e4794f93f0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0 2024-12-10T15:36:04,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0, entries=200, sequenceid=74, filesize=14.0 K 2024-12-10T15:36:04,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/ae36353a9eec4e9d8c140cf07accbc8e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e 2024-12-10T15:36:04,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T15:36:04,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/2353915cefea441483bb9bd5a03b4039 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039 2024-12-10T15:36:04,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T15:36:04,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 8a8b39bcfc5042b2f61256808771f62a in 434ms, sequenceid=74, compaction requested=false 2024-12-10T15:36:04,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:04,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:36:04,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:04,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:04,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:04,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:04,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:04,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:04,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:04,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/42055f6d57514beaab36fa87869d5c3e is 50, key is test_row_0/A:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:04,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-10T15:36:04,494 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-10T15:36:04,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:04,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741854_1030 (size=12001) 2024-12-10T15:36:04,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-10T15:36:04,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:04,504 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:04,506 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:04,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:04,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845024496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,524 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845024508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845024509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845024520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845024521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:04,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845024620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845024628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845024632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845024628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845024636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:04,826 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:04,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845024843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845024845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845024845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845024845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845024854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/42055f6d57514beaab36fa87869d5c3e 2024-12-10T15:36:04,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/8e1241f531bf4cedb3149e626f1019d6 is 50, key is test_row_0/B:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:04,995 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:04,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:04,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:04,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741855_1031 (size=12001) 2024-12-10T15:36:05,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:05,155 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:05,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:05,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845025156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845025158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845025159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845025156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845025174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,331 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:05,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/8e1241f531bf4cedb3149e626f1019d6 2024-12-10T15:36:05,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/113db005c9f74dceb1bcf77e86ee108c is 50, key is test_row_0/C:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:05,502 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:05,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:05,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741856_1032 (size=12001) 2024-12-10T15:36:05,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/113db005c9f74dceb1bcf77e86ee108c 2024-12-10T15:36:05,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/42055f6d57514beaab36fa87869d5c3e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e 2024-12-10T15:36:05,581 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e, entries=150, sequenceid=94, filesize=11.7 K 2024-12-10T15:36:05,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/8e1241f531bf4cedb3149e626f1019d6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6 2024-12-10T15:36:05,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6, entries=150, sequenceid=94, filesize=11.7 K 2024-12-10T15:36:05,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/113db005c9f74dceb1bcf77e86ee108c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c 2024-12-10T15:36:05,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:05,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c, entries=150, sequenceid=94, filesize=11.7 K 2024-12-10T15:36:05,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=120.76 KB/123660 for 8a8b39bcfc5042b2f61256808771f62a in 1222ms, sequenceid=94, compaction requested=true 2024-12-10T15:36:05,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:05,639 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:05,639 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:05,639 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:05,649 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:05,649 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:05,649 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:05,649 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:05,649 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,649 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,649 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/4cd421036f114eeda8c43242954b9678, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.3 K 2024-12-10T15:36:05,649 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/1514479717e546ec9e871cb3dc2e3f76, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=37.5 K 2024-12-10T15:36:05,656 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1514479717e546ec9e871cb3dc2e3f76, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963091 2024-12-10T15:36:05,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4cd421036f114eeda8c43242954b9678, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963091 2024-12-10T15:36:05,657 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ae36353a9eec4e9d8c140cf07accbc8e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733844963229 2024-12-10T15:36:05,658 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e9b103fb147492b801527e4794f93f0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733844963229 2024-12-10T15:36:05,659 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e1241f531bf4cedb3149e626f1019d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:05,659 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42055f6d57514beaab36fa87869d5c3e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:05,665 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-10T15:36:05,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,667 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:05,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:05,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/4cde373839f4420c9aa557463393c90f is 50, key is test_row_0/A:col10/1733844964507/Put/seqid=0 2024-12-10T15:36:05,696 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#19 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:05,697 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/9ab13e04db2c4ee5993d50ae5182b87f is 50, key is test_row_0/A:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:05,712 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#20 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:05,713 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/ff76886239524a28a6757028e9107208 is 50, key is test_row_0/B:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:05,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741857_1033 (size=12001) 2024-12-10T15:36:05,743 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/4cde373839f4420c9aa557463393c90f 2024-12-10T15:36:05,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741859_1035 (size=12207) 2024-12-10T15:36:05,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845025725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845025733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,754 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845025741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845025750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845025752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,766 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/ff76886239524a28a6757028e9107208 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ff76886239524a28a6757028e9107208 2024-12-10T15:36:05,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741858_1034 (size=12207) 2024-12-10T15:36:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/f3b06b1cdc4b44b0ad109737958e1e1a is 50, key is test_row_0/B:col10/1733844964507/Put/seqid=0 2024-12-10T15:36:05,796 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into ff76886239524a28a6757028e9107208(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:05,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:05,796 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844965639; duration=0sec 2024-12-10T15:36:05,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:05,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:05,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:05,802 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:05,802 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:05,802 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:05,802 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5b92018f495b4d738f208cc8c93e60b0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.3 K 2024-12-10T15:36:05,804 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b92018f495b4d738f208cc8c93e60b0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733844963091 2024-12-10T15:36:05,805 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2353915cefea441483bb9bd5a03b4039, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733844963229 2024-12-10T15:36:05,806 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 113db005c9f74dceb1bcf77e86ee108c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:05,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741860_1036 (size=12001) 2024-12-10T15:36:05,823 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/f3b06b1cdc4b44b0ad109737958e1e1a 2024-12-10T15:36:05,834 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:05,835 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/7e0a43ad2ecd4b6793bebb85cd4b548e is 50, key is test_row_0/C:col10/1733844964407/Put/seqid=0 2024-12-10T15:36:05,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0596c9dbb82b4948b3ef0dc161600005 is 50, key is test_row_0/C:col10/1733844964507/Put/seqid=0 2024-12-10T15:36:05,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845025857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845025858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845025858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845025859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845025862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:05,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741861_1037 (size=12207) 2024-12-10T15:36:05,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741862_1038 (size=12001) 2024-12-10T15:36:05,916 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0596c9dbb82b4948b3ef0dc161600005 2024-12-10T15:36:05,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/7e0a43ad2ecd4b6793bebb85cd4b548e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7e0a43ad2ecd4b6793bebb85cd4b548e 2024-12-10T15:36:05,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/4cde373839f4420c9aa557463393c90f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f 2024-12-10T15:36:05,994 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T15:36:06,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/f3b06b1cdc4b44b0ad109737958e1e1a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a 2024-12-10T15:36:06,009 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into 7e0a43ad2ecd4b6793bebb85cd4b548e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:06,009 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:06,009 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844965639; duration=0sec 2024-12-10T15:36:06,009 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:06,009 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:06,010 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T15:36:06,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0596c9dbb82b4948b3ef0dc161600005 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005 2024-12-10T15:36:06,027 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T15:36:06,030 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 8a8b39bcfc5042b2f61256808771f62a in 362ms, sequenceid=115, compaction requested=false 2024-12-10T15:36:06,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:06,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-10T15:36:06,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-10T15:36:06,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-10T15:36:06,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5450 sec 2024-12-10T15:36:06,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:06,095 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.5750 sec 2024-12-10T15:36:06,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:06,108 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/df83c5e2147c417390f5fc9d029a05bb is 50, key is test_row_0/A:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:06,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741863_1039 (size=12101) 2024-12-10T15:36:06,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/df83c5e2147c417390f5fc9d029a05bb 2024-12-10T15:36:06,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845026148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,184 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/9ab13e04db2c4ee5993d50ae5182b87f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/9ab13e04db2c4ee5993d50ae5182b87f 2024-12-10T15:36:06,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845026172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845026173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845026180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845026178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,199 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 9ab13e04db2c4ee5993d50ae5182b87f(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:06,199 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:06,199 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844965639; duration=0sec 2024-12-10T15:36:06,199 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:06,199 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:06,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5ee1c9642f894d1a9fe94d46c09b4912 is 50, key is test_row_0/B:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:06,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741864_1040 (size=12101) 2024-12-10T15:36:06,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845026285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845026288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845026288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845026292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845026309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845026501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845026504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845026507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845026508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845026518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-10T15:36:06,621 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-10T15:36:06,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:06,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-10T15:36:06,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:06,640 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:06,643 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:06,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:06,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5ee1c9642f894d1a9fe94d46c09b4912 2024-12-10T15:36:06,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/40bac7771a2f4546b2914353fa630382 is 50, key is test_row_0/C:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:06,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:06,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741865_1041 (size=12101) 2024-12-10T15:36:06,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/40bac7771a2f4546b2914353fa630382 2024-12-10T15:36:06,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/df83c5e2147c417390f5fc9d029a05bb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb 2024-12-10T15:36:06,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb, entries=150, sequenceid=137, filesize=11.8 K 2024-12-10T15:36:06,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5ee1c9642f894d1a9fe94d46c09b4912 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912 2024-12-10T15:36:06,802 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:06,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:06,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:06,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912, entries=150, sequenceid=137, filesize=11.8 K 2024-12-10T15:36:06,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/40bac7771a2f4546b2914353fa630382 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382 2024-12-10T15:36:06,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382, entries=150, sequenceid=137, filesize=11.8 K 2024-12-10T15:36:06,830 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 8a8b39bcfc5042b2f61256808771f62a in 734ms, sequenceid=137, compaction requested=true 2024-12-10T15:36:06,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:06,830 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:06,833 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:06,833 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:06,833 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,833 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/9ab13e04db2c4ee5993d50ae5182b87f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.5 K 2024-12-10T15:36:06,834 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ab13e04db2c4ee5993d50ae5182b87f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:06,835 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cde373839f4420c9aa557463393c90f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733844964496 2024-12-10T15:36:06,835 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting df83c5e2147c417390f5fc9d029a05bb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:06,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:06,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:06,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:06,857 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:06,859 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:06,859 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:06,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ff76886239524a28a6757028e9107208, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.5 K 2024-12-10T15:36:06,864 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ff76886239524a28a6757028e9107208, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:06,865 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f3b06b1cdc4b44b0ad109737958e1e1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733844964496 2024-12-10T15:36:06,866 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ee1c9642f894d1a9fe94d46c09b4912, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:06,888 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:06,889 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/8dc4352d04144ef89966263faac542f1 is 50, key is test_row_0/A:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:06,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:06,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:06,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:06,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:06,891 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:36:06,892 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:06,893 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/8180703620764a2bae5413ef6afd4562 is 50, key is test_row_0/B:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:06,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:06,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845026919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845026919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845026925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:06,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845026936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:06,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845026943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:06,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:06,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:06,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:06,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/8c5fbb4122c34975b4be2ef24d4e92de is 50, key is test_row_0/A:col10/1733844966870/Put/seqid=0 2024-12-10T15:36:07,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741867_1043 (size=12409) 2024-12-10T15:36:07,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741866_1042 (size=12409) 2024-12-10T15:36:07,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845027040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845027040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845027043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845027059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845027067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741868_1044 (size=12151) 2024-12-10T15:36:07,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/8c5fbb4122c34975b4be2ef24d4e92de 2024-12-10T15:36:07,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/dbc9fba4a8e9464dbc9f4acc5173eb3a is 50, key is test_row_0/B:col10/1733844966870/Put/seqid=0 2024-12-10T15:36:07,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741869_1045 (size=12151) 2024-12-10T15:36:07,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/dbc9fba4a8e9464dbc9f4acc5173eb3a 2024-12-10T15:36:07,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:07,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:07,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:07,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845027256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845027256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845027256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0a18acf710b84d619e09f15fbf0e5edb is 50, key is test_row_0/C:col10/1733844966870/Put/seqid=0 2024-12-10T15:36:07,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845027279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845027280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741870_1046 (size=12151) 2024-12-10T15:36:07,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,461 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/8180703620764a2bae5413ef6afd4562 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8180703620764a2bae5413ef6afd4562 2024-12-10T15:36:07,475 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/8dc4352d04144ef89966263faac542f1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8dc4352d04144ef89966263faac542f1 2024-12-10T15:36:07,476 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 8180703620764a2bae5413ef6afd4562(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:07,476 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:07,476 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844966857; duration=0sec 2024-12-10T15:36:07,476 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:07,476 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:07,476 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:07,483 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:07,483 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:07,483 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,483 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7e0a43ad2ecd4b6793bebb85cd4b548e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=35.5 K 2024-12-10T15:36:07,486 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e0a43ad2ecd4b6793bebb85cd4b548e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733844964006 2024-12-10T15:36:07,491 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0596c9dbb82b4948b3ef0dc161600005, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733844964496 2024-12-10T15:36:07,493 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 8dc4352d04144ef89966263faac542f1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:07,493 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:07,493 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844966830; duration=0sec 2024-12-10T15:36:07,494 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:07,494 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:07,495 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 40bac7771a2f4546b2914353fa630382, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:07,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,520 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,522 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#32 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:07,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,522 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/cf88f156cd174bc99b9f64060567d66e is 50, key is test_row_0/C:col10/1733844966084/Put/seqid=0 2024-12-10T15:36:07,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845027560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845027564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845027561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741871_1047 (size=12409) 2024-12-10T15:36:07,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/cf88f156cd174bc99b9f64060567d66e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cf88f156cd174bc99b9f64060567d66e 2024-12-10T15:36:07,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845027595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:07,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845027595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,604 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into cf88f156cd174bc99b9f64060567d66e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:07,604 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:07,604 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844966891; duration=0sec 2024-12-10T15:36:07,605 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:07,605 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:07,673 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:07,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:07,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:07,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0a18acf710b84d619e09f15fbf0e5edb 2024-12-10T15:36:07,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/8c5fbb4122c34975b4be2ef24d4e92de as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de 2024-12-10T15:36:07,734 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T15:36:07,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/dbc9fba4a8e9464dbc9f4acc5173eb3a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a 2024-12-10T15:36:07,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T15:36:07,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/0a18acf710b84d619e09f15fbf0e5edb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb 2024-12-10T15:36:07,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:07,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb, entries=150, sequenceid=165, filesize=11.9 K 2024-12-10T15:36:07,762 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 8a8b39bcfc5042b2f61256808771f62a in 871ms, sequenceid=165, compaction requested=false 2024-12-10T15:36:07,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:07,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:07,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-10T15:36:07,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:07,835 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:07,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b6e4d11daa974b5f97b7d4f33774107d is 50, key is test_row_1/A:col10/1733844966924/Put/seqid=0 2024-12-10T15:36:07,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741872_1048 (size=9757) 2024-12-10T15:36:08,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:08,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845028157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845028160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845028164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845028165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845028167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845028269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845028274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845028277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845028277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845028278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,347 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b6e4d11daa974b5f97b7d4f33774107d 2024-12-10T15:36:08,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/66bad501625346e8aac1f898fc904eed is 50, key is test_row_1/B:col10/1733844966924/Put/seqid=0 2024-12-10T15:36:08,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741873_1049 (size=9757) 2024-12-10T15:36:08,479 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/66bad501625346e8aac1f898fc904eed 2024-12-10T15:36:08,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845028479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845028488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845028489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845028489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845028491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e918baba87d14cddb5d621cafebaa397 is 50, key is test_row_1/C:col10/1733844966924/Put/seqid=0 2024-12-10T15:36:08,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741874_1050 (size=9757) 2024-12-10T15:36:08,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:08,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845028785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845028800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845028800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845028800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:08,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:08,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845028804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,004 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e918baba87d14cddb5d621cafebaa397 2024-12-10T15:36:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b6e4d11daa974b5f97b7d4f33774107d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d 2024-12-10T15:36:09,048 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d, entries=100, sequenceid=178, filesize=9.5 K 2024-12-10T15:36:09,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/66bad501625346e8aac1f898fc904eed as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed 2024-12-10T15:36:09,096 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed, entries=100, sequenceid=178, filesize=9.5 K 2024-12-10T15:36:09,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e918baba87d14cddb5d621cafebaa397 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397 2024-12-10T15:36:09,125 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397, entries=100, sequenceid=178, filesize=9.5 K 2024-12-10T15:36:09,128 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 8a8b39bcfc5042b2f61256808771f62a in 1293ms, sequenceid=178, compaction requested=true 2024-12-10T15:36:09,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:09,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:09,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-10T15:36:09,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-10T15:36:09,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-10T15:36:09,167 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5020 sec 2024-12-10T15:36:09,187 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.5520 sec 2024-12-10T15:36:09,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:09,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:09,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:09,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 is 50, key is test_row_0/A:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:09,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845029323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845029332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845029340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845029328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845029343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741875_1051 (size=14541) 2024-12-10T15:36:09,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 2024-12-10T15:36:09,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/3002b2c23bf14c5fb08b7c3be7a85170 is 50, key is test_row_0/B:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:09,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741876_1052 (size=12151) 2024-12-10T15:36:09,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/3002b2c23bf14c5fb08b7c3be7a85170 2024-12-10T15:36:09,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845029445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845029445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845029447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845029449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845029450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e55c6e0436fb4637b9e6ec9855d60ee4 is 50, key is test_row_0/C:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741877_1053 (size=12151) 2024-12-10T15:36:09,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845029656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845029657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845029663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845029664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845029664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=206 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e55c6e0436fb4637b9e6ec9855d60ee4 2024-12-10T15:36:09,966 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845029961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845029969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845029971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845029977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 2024-12-10T15:36:09,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845029982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:09,990 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139, entries=200, sequenceid=206, filesize=14.2 K 2024-12-10T15:36:09,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/3002b2c23bf14c5fb08b7c3be7a85170 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170 2024-12-10T15:36:10,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170, entries=150, sequenceid=206, filesize=11.9 K 2024-12-10T15:36:10,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e55c6e0436fb4637b9e6ec9855d60ee4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4 2024-12-10T15:36:10,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4, entries=150, sequenceid=206, filesize=11.9 K 2024-12-10T15:36:10,016 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 8a8b39bcfc5042b2f61256808771f62a in 718ms, sequenceid=206, compaction requested=true 2024-12-10T15:36:10,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:10,016 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:10,017 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:10,017 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:10,020 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48858 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:10,020 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:10,020 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:10,020 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8dc4352d04144ef89966263faac542f1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=47.7 K 2024-12-10T15:36:10,021 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46468 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:10,021 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:10,021 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:10,021 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8180703620764a2bae5413ef6afd4562, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=45.4 K 2024-12-10T15:36:10,023 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8dc4352d04144ef89966263faac542f1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:10,024 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8180703620764a2bae5413ef6afd4562, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:10,025 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting dbc9fba4a8e9464dbc9f4acc5173eb3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733844966870 2024-12-10T15:36:10,025 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c5fbb4122c34975b4be2ef24d4e92de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733844966870 2024-12-10T15:36:10,026 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 66bad501625346e8aac1f898fc904eed, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733844966924 2024-12-10T15:36:10,026 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6e4d11daa974b5f97b7d4f33774107d, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733844966924 2024-12-10T15:36:10,026 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3002b2c23bf14c5fb08b7c3be7a85170, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:10,027 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99d2b0c7a6b74dbfbebc14d7c1c8d139, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:10,069 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:10,070 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a is 50, key is test_row_0/A:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:10,085 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:10,086 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/980d95d1de904d00909672a2a45e569f is 50, key is test_row_0/B:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:10,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741878_1054 (size=12595) 2024-12-10T15:36:10,134 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a 2024-12-10T15:36:10,144 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into ba9f8e2fcd6b4e06bfcb212d1048dd9a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:10,144 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:10,145 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=12, startTime=1733844970016; duration=0sec 2024-12-10T15:36:10,145 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:10,145 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:10,145 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:10,148 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46468 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:10,148 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:10,148 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:10,149 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cf88f156cd174bc99b9f64060567d66e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=45.4 K 2024-12-10T15:36:10,149 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf88f156cd174bc99b9f64060567d66e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733844965732 2024-12-10T15:36:10,150 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a18acf710b84d619e09f15fbf0e5edb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1733844966870 2024-12-10T15:36:10,150 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e918baba87d14cddb5d621cafebaa397, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1733844966924 2024-12-10T15:36:10,151 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e55c6e0436fb4637b9e6ec9855d60ee4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:10,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741879_1055 (size=12595) 2024-12-10T15:36:10,163 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/980d95d1de904d00909672a2a45e569f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/980d95d1de904d00909672a2a45e569f 2024-12-10T15:36:10,179 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 980d95d1de904d00909672a2a45e569f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:10,179 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:10,179 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=12, startTime=1733844970017; duration=0sec 2024-12-10T15:36:10,179 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:10,179 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:10,186 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#41 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:10,186 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/54acb781a713458d8e13839406e7743b is 50, key is test_row_0/C:col10/1733844969295/Put/seqid=0 2024-12-10T15:36:10,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741880_1056 (size=12595) 2024-12-10T15:36:10,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:10,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:10,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:10,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/aaa1dbc19d78443aa4045b15a5ad174b is 50, key is test_row_0/A:col10/1733844969324/Put/seqid=0 2024-12-10T15:36:10,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741881_1057 (size=14541) 2024-12-10T15:36:10,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845030584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845030591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845030608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845030607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845030609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,649 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/54acb781a713458d8e13839406e7743b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/54acb781a713458d8e13839406e7743b 2024-12-10T15:36:10,659 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into 54acb781a713458d8e13839406e7743b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:10,659 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:10,659 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=12, startTime=1733844970017; duration=0sec 2024-12-10T15:36:10,659 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:10,660 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:10,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845030708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845030716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845030728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845030724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845030740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-10T15:36:10,769 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-10T15:36:10,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:10,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-10T15:36:10,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:10,792 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:10,800 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:10,800 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:10,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:10,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845030922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845030928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845030935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845030943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:10,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845030946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,954 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:10,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:10,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:10,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:10,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:10,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:10,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:10,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:10,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/aaa1dbc19d78443aa4045b15a5ad174b 2024-12-10T15:36:10,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/a9dc47a581aa412b9c7715c221e54916 is 50, key is test_row_0/B:col10/1733844969324/Put/seqid=0 2024-12-10T15:36:11,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741882_1058 (size=12151) 2024-12-10T15:36:11,045 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/a9dc47a581aa412b9c7715c221e54916 2024-12-10T15:36:11,069 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e7515db364b641b68c67805b2745d46b is 50, key is test_row_0/C:col10/1733844969324/Put/seqid=0 2024-12-10T15:36:11,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:11,110 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:11,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:11,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741883_1059 (size=12151) 2024-12-10T15:36:11,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e7515db364b641b68c67805b2745d46b 2024-12-10T15:36:11,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/aaa1dbc19d78443aa4045b15a5ad174b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b 2024-12-10T15:36:11,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b, entries=200, sequenceid=219, filesize=14.2 K 2024-12-10T15:36:11,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/a9dc47a581aa412b9c7715c221e54916 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916 2024-12-10T15:36:11,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916, entries=150, sequenceid=219, filesize=11.9 K 2024-12-10T15:36:11,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/e7515db364b641b68c67805b2745d46b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b 2024-12-10T15:36:11,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b, entries=150, sequenceid=219, filesize=11.9 K 2024-12-10T15:36:11,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 8a8b39bcfc5042b2f61256808771f62a in 699ms, sequenceid=219, compaction requested=false 2024-12-10T15:36:11,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:11,243 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:11,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:11,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,272 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b2a51dadbe19456d80c2a3ac042b768c is 50, key is test_row_0/A:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:11,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:11,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845031263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845031262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845031279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845031281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845031282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741884_1060 (size=12151) 2024-12-10T15:36:11,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b2a51dadbe19456d80c2a3ac042b768c 2024-12-10T15:36:11,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b9b806444c8e4829bd3633820c3f3ea5 is 50, key is test_row_0/B:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845031388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845031389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845031394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845031394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845031403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741885_1061 (size=12151) 2024-12-10T15:36:11,431 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b9b806444c8e4829bd3633820c3f3ea5 2024-12-10T15:36:11,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:11,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/69bc187f77604f819b958c1916b9eb98 is 50, key is test_row_0/C:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741886_1062 (size=12151) 2024-12-10T15:36:11,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/69bc187f77604f819b958c1916b9eb98 2024-12-10T15:36:11,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b2a51dadbe19456d80c2a3ac042b768c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c 2024-12-10T15:36:11,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:36:11,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b9b806444c8e4829bd3633820c3f3ea5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5 2024-12-10T15:36:11,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845031596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845031608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845031608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:11,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845031608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:11,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:36:11,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845031617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/69bc187f77604f819b958c1916b9eb98 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98 2024-12-10T15:36:11,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:36:11,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 8a8b39bcfc5042b2f61256808771f62a in 448ms, sequenceid=248, compaction requested=true 2024-12-10T15:36:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:11,693 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:11,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:11,693 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:11,697 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:11,697 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:11,697 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,697 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=38.4 K 2024-12-10T15:36:11,697 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:11,698 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:11,698 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,698 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/980d95d1de904d00909672a2a45e569f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.0 K 2024-12-10T15:36:11,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:11,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:11,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:11,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:11,699 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 980d95d1de904d00909672a2a45e569f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:11,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba9f8e2fcd6b4e06bfcb212d1048dd9a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:11,711 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a9dc47a581aa412b9c7715c221e54916, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733844969324 2024-12-10T15:36:11,711 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting aaa1dbc19d78443aa4045b15a5ad174b, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733844969310 2024-12-10T15:36:11,712 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting b9b806444c8e4829bd3633820c3f3ea5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:11,712 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2a51dadbe19456d80c2a3ac042b768c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:11,732 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#48 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:11,733 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/55c0f9122e4143658bf3aba0351df8f3 is 50, key is test_row_0/A:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,738 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:11,739 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/435039e07c0a43999fc38a461dc30736 is 50, key is test_row_0/B:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741887_1063 (size=12697) 2024-12-10T15:36:11,770 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:11,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-10T15:36:11,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,771 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-10T15:36:11,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:11,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:11,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:11,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:11,776 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/55c0f9122e4143658bf3aba0351df8f3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/55c0f9122e4143658bf3aba0351df8f3 2024-12-10T15:36:11,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741888_1064 (size=12697) 2024-12-10T15:36:11,788 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 55c0f9122e4143658bf3aba0351df8f3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:11,788 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:11,788 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844971693; duration=0sec 2024-12-10T15:36:11,788 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:11,789 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:11,789 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:11,790 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:11,791 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:11,791 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:11,791 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/54acb781a713458d8e13839406e7743b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.0 K 2024-12-10T15:36:11,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/6df7be84f5be49ee924a1e0ec89ec64b is 50, key is test_row_0/A:col10/1733844971267/Put/seqid=0 2024-12-10T15:36:11,799 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54acb781a713458d8e13839406e7743b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=206, earliestPutTs=1733844968150 2024-12-10T15:36:11,800 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/435039e07c0a43999fc38a461dc30736 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/435039e07c0a43999fc38a461dc30736 2024-12-10T15:36:11,800 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7515db364b641b68c67805b2745d46b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733844969324 2024-12-10T15:36:11,800 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69bc187f77604f819b958c1916b9eb98, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:11,819 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 435039e07c0a43999fc38a461dc30736(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:11,819 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:11,819 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844971693; duration=0sec 2024-12-10T15:36:11,820 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:11,820 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:11,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741889_1065 (size=9757) 2024-12-10T15:36:11,825 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/6df7be84f5be49ee924a1e0ec89ec64b 2024-12-10T15:36:11,832 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:11,833 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/bcd4abac354548c4be685e635550f2ab is 50, key is test_row_0/C:col10/1733844971237/Put/seqid=0 2024-12-10T15:36:11,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/fd75d6a258ee4bcb905d9781bea9fe61 is 50, key is test_row_0/B:col10/1733844971267/Put/seqid=0 2024-12-10T15:36:11,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741890_1066 (size=12697) 2024-12-10T15:36:11,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:11,915 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/bcd4abac354548c4be685e635550f2ab as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/bcd4abac354548c4be685e635550f2ab 2024-12-10T15:36:11,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741891_1067 (size=9757) 2024-12-10T15:36:11,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:11,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:11,942 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into bcd4abac354548c4be685e635550f2ab(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:11,943 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:11,943 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844971698; duration=0sec 2024-12-10T15:36:11,943 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:11,943 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:12,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845032035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845032039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845032044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845032046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845032061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845032158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845032162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845032166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845032169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845032171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,360 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/fd75d6a258ee4bcb905d9781bea9fe61 2024-12-10T15:36:12,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845032370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845032371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845032378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845032381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845032387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/1a300140b9794e87a91a0ea289ef81d8 is 50, key is test_row_0/C:col10/1733844971267/Put/seqid=0 2024-12-10T15:36:12,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741892_1068 (size=9757) 2024-12-10T15:36:12,480 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=257 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/1a300140b9794e87a91a0ea289ef81d8 2024-12-10T15:36:12,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/6df7be84f5be49ee924a1e0ec89ec64b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b 2024-12-10T15:36:12,521 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b, entries=100, sequenceid=257, filesize=9.5 K 2024-12-10T15:36:12,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/fd75d6a258ee4bcb905d9781bea9fe61 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61 2024-12-10T15:36:12,539 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61, entries=100, sequenceid=257, filesize=9.5 K 2024-12-10T15:36:12,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/1a300140b9794e87a91a0ea289ef81d8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8 2024-12-10T15:36:12,547 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8, entries=100, sequenceid=257, filesize=9.5 K 2024-12-10T15:36:12,548 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=174.43 KB/178620 for 8a8b39bcfc5042b2f61256808771f62a in 777ms, sequenceid=257, compaction requested=false 2024-12-10T15:36:12,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:12,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:12,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-10T15:36:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-10T15:36:12,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-10T15:36:12,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7570 sec 2024-12-10T15:36:12,588 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.7950 sec 2024-12-10T15:36:12,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T15:36:12,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:12,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:12,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:12,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:12,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:12,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:12,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:12,709 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/25b31a0525ea4bc692380663230da6ad is 50, key is test_row_0/A:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:12,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741893_1069 (size=14741) 2024-12-10T15:36:12,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845032694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845032764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845032766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845032764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845032775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845032862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845032875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845032882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845032882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-10T15:36:12,900 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-10T15:36:12,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845032899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:12,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:12,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-10T15:36:12,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:12,927 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:12,930 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:12,930 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:13,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:13,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845033072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845033084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,095 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845033094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845033095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,099 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,100 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845033123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/25b31a0525ea4bc692380663230da6ad 2024-12-10T15:36:13,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5769f6613ca14cf69ea2977854d43cc7 is 50, key is test_row_0/B:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:13,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741894_1070 (size=12301) 2024-12-10T15:36:13,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845033380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,401 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845033392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845033404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845033408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,426 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845033447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:13,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5769f6613ca14cf69ea2977854d43cc7 2024-12-10T15:36:13,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/07b53fd4094d48a7b365f3dde238a8bd is 50, key is test_row_0/C:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:13,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741895_1071 (size=12301) 2024-12-10T15:36:13,756 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845033900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845033908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845033913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845033916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,934 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:13,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:13,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:13,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:13,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:13,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845033961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:14,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:14,095 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:14,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:14,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:14,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:14,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:14,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:14,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/07b53fd4094d48a7b365f3dde238a8bd 2024-12-10T15:36:14,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/25b31a0525ea4bc692380663230da6ad as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad 2024-12-10T15:36:14,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad, entries=200, sequenceid=290, filesize=14.4 K 2024-12-10T15:36:14,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5769f6613ca14cf69ea2977854d43cc7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7 2024-12-10T15:36:14,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T15:36:14,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/07b53fd4094d48a7b365f3dde238a8bd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd 2024-12-10T15:36:14,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T15:36:14,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=33.54 KB/34350 for 8a8b39bcfc5042b2f61256808771f62a in 1512ms, sequenceid=290, compaction requested=true 2024-12-10T15:36:14,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:14,202 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:14,202 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:14,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:14,203 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:14,204 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:14,204 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,204 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/55c0f9122e4143658bf3aba0351df8f3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.3 K 2024-12-10T15:36:14,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:14,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:14,204 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,204 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/435039e07c0a43999fc38a461dc30736, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=33.9 K 2024-12-10T15:36:14,205 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 435039e07c0a43999fc38a461dc30736, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:14,205 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55c0f9122e4143658bf3aba0351df8f3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fd75d6a258ee4bcb905d9781bea9fe61, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733844971267 2024-12-10T15:36:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6df7be84f5be49ee924a1e0ec89ec64b, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733844971267 2024-12-10T15:36:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5769f6613ca14cf69ea2977854d43cc7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972031 2024-12-10T15:36:14,208 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25b31a0525ea4bc692380663230da6ad, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972006 2024-12-10T15:36:14,231 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#57 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:14,232 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/67d6c15acdad4f17992e99de7e7c9558 is 50, key is test_row_0/A:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:14,233 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#58 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:14,233 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/d0079a37a9914bfba7062d884c0d1f0c is 50, key is test_row_0/B:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:14,261 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:14,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-10T15:36:14,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,265 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T15:36:14,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:14,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:14,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:14,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:14,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:14,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741897_1073 (size=12949) 2024-12-10T15:36:14,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/5d3a632119074d94a554e773ccfc8f7c is 50, key is test_row_0/A:col10/1733844972693/Put/seqid=0 2024-12-10T15:36:14,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741896_1072 (size=12949) 2024-12-10T15:36:14,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741898_1074 (size=12301) 2024-12-10T15:36:14,376 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/d0079a37a9914bfba7062d884c0d1f0c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d0079a37a9914bfba7062d884c0d1f0c 2024-12-10T15:36:14,385 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into d0079a37a9914bfba7062d884c0d1f0c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:14,385 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:14,385 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844974202; duration=0sec 2024-12-10T15:36:14,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:14,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:14,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:14,388 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:14,388 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:14,388 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:14,388 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/bcd4abac354548c4be685e635550f2ab, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=33.9 K 2024-12-10T15:36:14,389 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd4abac354548c4be685e635550f2ab, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733844970576 2024-12-10T15:36:14,389 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a300140b9794e87a91a0ea289ef81d8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=257, earliestPutTs=1733844971267 2024-12-10T15:36:14,389 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 07b53fd4094d48a7b365f3dde238a8bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972031 2024-12-10T15:36:14,417 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:14,417 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/6b4ceff5c14f44de97ca796edb9c1c60 is 50, key is test_row_0/C:col10/1733844972031/Put/seqid=0 2024-12-10T15:36:14,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741899_1075 (size=12949) 2024-12-10T15:36:14,696 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/67d6c15acdad4f17992e99de7e7c9558 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/67d6c15acdad4f17992e99de7e7c9558 2024-12-10T15:36:14,719 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 67d6c15acdad4f17992e99de7e7c9558(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:14,719 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:14,719 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844974202; duration=0sec 2024-12-10T15:36:14,720 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:14,720 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:14,771 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/5d3a632119074d94a554e773ccfc8f7c 2024-12-10T15:36:14,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 is 50, key is test_row_0/B:col10/1733844972693/Put/seqid=0 2024-12-10T15:36:14,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741900_1076 (size=12301) 2024-12-10T15:36:14,882 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 2024-12-10T15:36:14,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/6b4ceff5c14f44de97ca796edb9c1c60 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6b4ceff5c14f44de97ca796edb9c1c60 2024-12-10T15:36:14,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5f4de534ceaf47c192ed83812985c626 is 50, key is test_row_0/C:col10/1733844972693/Put/seqid=0 2024-12-10T15:36:14,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:14,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:14,934 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into 6b4ceff5c14f44de97ca796edb9c1c60(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:14,934 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:14,934 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844974202; duration=0sec 2024-12-10T15:36:14,934 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:14,934 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:14,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741901_1077 (size=12301) 2024-12-10T15:36:14,960 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5f4de534ceaf47c192ed83812985c626 2024-12-10T15:36:14,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/5d3a632119074d94a554e773ccfc8f7c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c 2024-12-10T15:36:15,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845035008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,019 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c, entries=150, sequenceid=298, filesize=12.0 K 2024-12-10T15:36:15,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845035010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845035012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845035012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845035016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 2024-12-10T15:36:15,040 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6, entries=150, sequenceid=298, filesize=12.0 K 2024-12-10T15:36:15,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5f4de534ceaf47c192ed83812985c626 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626 2024-12-10T15:36:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:15,050 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626, entries=150, sequenceid=298, filesize=12.0 K 2024-12-10T15:36:15,051 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 8a8b39bcfc5042b2f61256808771f62a in 786ms, sequenceid=298, compaction requested=false 2024-12-10T15:36:15,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:15,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:15,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-10T15:36:15,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-10T15:36:15,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-10T15:36:15,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1370 sec 2024-12-10T15:36:15,073 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 2.1470 sec 2024-12-10T15:36:15,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:15,123 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:15,124 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:15,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/fe93889a54a34b28b9055d4f96ffd28b is 50, key is test_row_0/A:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:15,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845035135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845035139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845035139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845035146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,151 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845035147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741902_1078 (size=14741) 2024-12-10T15:36:15,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/fe93889a54a34b28b9055d4f96ffd28b 2024-12-10T15:36:15,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2240596fdc5b436692f9aac8e529831b is 50, key is test_row_0/B:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:15,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845035251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845035253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845035263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741903_1079 (size=12301) 2024-12-10T15:36:15,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2240596fdc5b436692f9aac8e529831b 2024-12-10T15:36:15,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845035353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845035375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/4ab9750083cf4c7b91e338e3110ac86d is 50, key is test_row_0/C:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:15,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741904_1080 (size=12301) 2024-12-10T15:36:15,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845035455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845035456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845035479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845035671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845035687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845035760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845035766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:15,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845035785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:15,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/4ab9750083cf4c7b91e338e3110ac86d 2024-12-10T15:36:15,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/fe93889a54a34b28b9055d4f96ffd28b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b 2024-12-10T15:36:15,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b, entries=200, sequenceid=331, filesize=14.4 K 2024-12-10T15:36:15,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2240596fdc5b436692f9aac8e529831b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b 2024-12-10T15:36:15,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b, entries=150, sequenceid=331, filesize=12.0 K 2024-12-10T15:36:15,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/4ab9750083cf4c7b91e338e3110ac86d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d 2024-12-10T15:36:15,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d, entries=150, sequenceid=331, filesize=12.0 K 2024-12-10T15:36:15,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 8a8b39bcfc5042b2f61256808771f62a in 779ms, sequenceid=331, compaction requested=true 2024-12-10T15:36:15,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:15,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:15,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:15,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:15,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:15,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:15,903 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:36:15,903 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:15,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:15,905 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:15,905 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:15,905 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:15,905 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6b4ceff5c14f44de97ca796edb9c1c60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.7 K 2024-12-10T15:36:15,905 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39991 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:15,905 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:15,905 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:15,906 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/67d6c15acdad4f17992e99de7e7c9558, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=39.1 K 2024-12-10T15:36:15,906 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b4ceff5c14f44de97ca796edb9c1c60, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972031 2024-12-10T15:36:15,906 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67d6c15acdad4f17992e99de7e7c9558, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972031 2024-12-10T15:36:15,906 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f4de534ceaf47c192ed83812985c626, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733844972693 2024-12-10T15:36:15,907 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d3a632119074d94a554e773ccfc8f7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733844972693 2024-12-10T15:36:15,907 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ab9750083cf4c7b91e338e3110ac86d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975010 2024-12-10T15:36:15,910 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe93889a54a34b28b9055d4f96ffd28b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975008 2024-12-10T15:36:15,925 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:15,926 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b310e5f79c914b30a5410e110d049eec is 50, key is test_row_0/A:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:15,938 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#67 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:15,939 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/cca559971a31474da0d233e129dfc3a1 is 50, key is test_row_0/C:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:16,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741905_1081 (size=13051) 2024-12-10T15:36:16,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741906_1082 (size=13051) 2024-12-10T15:36:16,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:16,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:16,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:16,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:16,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bd08306b93184e6ab9e9d8aab1691bbf is 50, key is test_row_0/A:col10/1733844976196/Put/seqid=0 2024-12-10T15:36:16,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741907_1083 (size=12301) 2024-12-10T15:36:16,304 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bd08306b93184e6ab9e9d8aab1691bbf 2024-12-10T15:36:16,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5a579849a02c4e98a703d13f240b046d is 50, key is test_row_0/B:col10/1733844976196/Put/seqid=0 2024-12-10T15:36:16,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845036319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845036326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845036328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845036332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845036333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741908_1084 (size=12301) 2024-12-10T15:36:16,391 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5a579849a02c4e98a703d13f240b046d 2024-12-10T15:36:16,407 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/28911189c55e4594b7032240582de6ad is 50, key is test_row_0/C:col10/1733844976196/Put/seqid=0 2024-12-10T15:36:16,427 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/b310e5f79c914b30a5410e110d049eec as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b310e5f79c914b30a5410e110d049eec 2024-12-10T15:36:16,438 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into b310e5f79c914b30a5410e110d049eec(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:16,438 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:16,438 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844975902; duration=0sec 2024-12-10T15:36:16,439 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:16,439 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:16,439 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:16,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741909_1085 (size=12301) 2024-12-10T15:36:16,446 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:16,447 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:16,447 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:16,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,447 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d0079a37a9914bfba7062d884c0d1f0c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.7 K 2024-12-10T15:36:16,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845036443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,449 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0079a37a9914bfba7062d884c0d1f0c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733844972031 2024-12-10T15:36:16,453 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d0d0c2fcdaa454a8f0ba04e689d93d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1733844972693 2024-12-10T15:36:16,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845036445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,455 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2240596fdc5b436692f9aac8e529831b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975010 2024-12-10T15:36:16,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845036446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845036448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845036453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,465 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/cca559971a31474da0d233e129dfc3a1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cca559971a31474da0d233e129dfc3a1 2024-12-10T15:36:16,479 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#71 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:16,480 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/1e9f8f87075e4ba98558432f6be91975 is 50, key is test_row_0/B:col10/1733844975010/Put/seqid=0 2024-12-10T15:36:16,481 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into cca559971a31474da0d233e129dfc3a1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:16,481 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:16,481 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844975903; duration=0sec 2024-12-10T15:36:16,481 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:16,481 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741910_1086 (size=13051) 2024-12-10T15:36:16,536 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/1e9f8f87075e4ba98558432f6be91975 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/1e9f8f87075e4ba98558432f6be91975 2024-12-10T15:36:16,546 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 1e9f8f87075e4ba98558432f6be91975(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:16,546 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:16,546 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844975903; duration=0sec 2024-12-10T15:36:16,549 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:16,549 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:16,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845036654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845036660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,663 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845036660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845036662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:16,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845036665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:16,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/28911189c55e4594b7032240582de6ad 2024-12-10T15:36:16,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/bd08306b93184e6ab9e9d8aab1691bbf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf 2024-12-10T15:36:16,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf, entries=150, sequenceid=342, filesize=12.0 K 2024-12-10T15:36:16,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/5a579849a02c4e98a703d13f240b046d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d 2024-12-10T15:36:16,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d, entries=150, sequenceid=342, filesize=12.0 K 2024-12-10T15:36:16,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/28911189c55e4594b7032240582de6ad as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad 2024-12-10T15:36:16,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad, entries=150, sequenceid=342, filesize=12.0 K 2024-12-10T15:36:16,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8a8b39bcfc5042b2f61256808771f62a in 711ms, sequenceid=342, compaction requested=false 2024-12-10T15:36:16,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:16,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:16,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:16,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:16,977 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/e4700be553614010b257434067afd0b9 is 50, key is test_row_0/A:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845036987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845036996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845037012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845037012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845037016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741911_1087 (size=14741) 2024-12-10T15:36:17,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/e4700be553614010b257434067afd0b9 2024-12-10T15:36:17,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-10T15:36:17,049 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-10T15:36:17,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:17,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-10T15:36:17,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:17,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b1b9fad221b0400da619cce42383b9ea is 50, key is test_row_0/B:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,069 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:17,076 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:17,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:17,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741912_1088 (size=12301) 2024-12-10T15:36:17,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845037118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845037118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845037119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845037119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:17,230 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:17,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:17,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845037324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845037333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845037333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845037336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:17,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b1b9fad221b0400da619cce42383b9ea 2024-12-10T15:36:17,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845037518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/7b0344fc18164c1ab5639ec92b599f12 is 50, key is test_row_0/C:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741913_1089 (size=12301) 2024-12-10T15:36:17,559 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/7b0344fc18164c1ab5639ec92b599f12 2024-12-10T15:36:17,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/e4700be553614010b257434067afd0b9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9 2024-12-10T15:36:17,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9, entries=200, sequenceid=371, filesize=14.4 K 2024-12-10T15:36:17,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b1b9fad221b0400da619cce42383b9ea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea 2024-12-10T15:36:17,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea, entries=150, sequenceid=371, filesize=12.0 K 2024-12-10T15:36:17,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/7b0344fc18164c1ab5639ec92b599f12 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12 2024-12-10T15:36:17,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12, entries=150, sequenceid=371, filesize=12.0 K 2024-12-10T15:36:17,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 8a8b39bcfc5042b2f61256808771f62a in 641ms, sequenceid=371, compaction requested=true 2024-12-10T15:36:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:17,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:17,609 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:17,609 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:17,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:17,610 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:17,610 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:17,610 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,611 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b310e5f79c914b30a5410e110d049eec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=39.2 K 2024-12-10T15:36:17,611 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:17,611 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:17,612 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,612 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/1e9f8f87075e4ba98558432f6be91975, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.8 K 2024-12-10T15:36:17,612 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b310e5f79c914b30a5410e110d049eec, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975010 2024-12-10T15:36:17,612 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e9f8f87075e4ba98558432f6be91975, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975010 2024-12-10T15:36:17,613 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd08306b93184e6ab9e9d8aab1691bbf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733844975131 2024-12-10T15:36:17,613 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a579849a02c4e98a703d13f240b046d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733844975131 2024-12-10T15:36:17,614 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4700be553614010b257434067afd0b9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:17,614 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting b1b9fad221b0400da619cce42383b9ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:17,638 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#75 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:17,638 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:17,639 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/f62b72b2f8cb44e9b006af231dcda4c8 is 50, key is test_row_0/B:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,639 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/3c2e6a21ff4240a9872cbff8eed1b24f is 50, key is test_row_0/A:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:36:17,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:17,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:17,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:17,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:17,645 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:17,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:17,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:17,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:17,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/81120c39f0ef4a34a68617e354d89976 is 50, key is test_row_0/A:col10/1733844977629/Put/seqid=0 2024-12-10T15:36:17,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741914_1090 (size=13153) 2024-12-10T15:36:17,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741915_1091 (size=13153) 2024-12-10T15:36:17,708 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/f62b72b2f8cb44e9b006af231dcda4c8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f62b72b2f8cb44e9b006af231dcda4c8 2024-12-10T15:36:17,713 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/3c2e6a21ff4240a9872cbff8eed1b24f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/3c2e6a21ff4240a9872cbff8eed1b24f 2024-12-10T15:36:17,715 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,717 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:17,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:17,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,717 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741916_1092 (size=12301) 2024-12-10T15:36:17,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/81120c39f0ef4a34a68617e354d89976 2024-12-10T15:36:17,737 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into f62b72b2f8cb44e9b006af231dcda4c8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:17,737 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:17,737 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=13, startTime=1733844977608; duration=0sec 2024-12-10T15:36:17,737 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:17,737 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:17,738 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:17,742 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 3c2e6a21ff4240a9872cbff8eed1b24f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:17,742 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:17,742 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=13, startTime=1733844977608; duration=0sec 2024-12-10T15:36:17,742 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:17,742 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:17,744 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:17,744 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:17,744 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,745 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cca559971a31474da0d233e129dfc3a1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=36.8 K 2024-12-10T15:36:17,746 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting cca559971a31474da0d233e129dfc3a1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1733844975010 2024-12-10T15:36:17,747 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 28911189c55e4594b7032240582de6ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733844975131 2024-12-10T15:36:17,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/49231475c651472caf09af9e3057be57 is 50, key is test_row_0/B:col10/1733844977629/Put/seqid=0 2024-12-10T15:36:17,752 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b0344fc18164c1ab5639ec92b599f12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:17,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845037760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845037773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,788 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:17,789 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/c0134fb20b8d4415a25c8edd4ac47d96 is 50, key is test_row_0/C:col10/1733844976320/Put/seqid=0 2024-12-10T15:36:17,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845037789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845037789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741917_1093 (size=12301) 2024-12-10T15:36:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741918_1094 (size=13153) 2024-12-10T15:36:17,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845037874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:17,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:17,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:17,886 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:17,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845037894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845037902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:17,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845037902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:18,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:18,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845038079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845038103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845038115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845038116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:18,223 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:18,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/49231475c651472caf09af9e3057be57 2024-12-10T15:36:18,300 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/c0134fb20b8d4415a25c8edd4ac47d96 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/c0134fb20b8d4415a25c8edd4ac47d96 2024-12-10T15:36:18,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 is 50, key is test_row_0/C:col10/1733844977629/Put/seqid=0 2024-12-10T15:36:18,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741919_1095 (size=12301) 2024-12-10T15:36:18,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 2024-12-10T15:36:18,346 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into c0134fb20b8d4415a25c8edd4ac47d96(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:18,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:18,352 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=13, startTime=1733844977609; duration=0sec 2024-12-10T15:36:18,353 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:18,353 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:18,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/81120c39f0ef4a34a68617e354d89976 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976 2024-12-10T15:36:18,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:18,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:18,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,384 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:18,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845038392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T15:36:18,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845038411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/49231475c651472caf09af9e3057be57 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57 2024-12-10T15:36:18,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845038424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845038424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T15:36:18,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 2024-12-10T15:36:18,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36368 deadline: 1733845038529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0, entries=150, sequenceid=383, filesize=12.0 K 2024-12-10T15:36:18,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 8a8b39bcfc5042b2f61256808771f62a in 900ms, sequenceid=383, compaction requested=false 2024-12-10T15:36:18,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:18,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-10T15:36:18,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:18,552 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:18,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:18,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/2d2751e316b244e982689d8fcbc07498 is 50, key is test_row_0/A:col10/1733844977770/Put/seqid=0 2024-12-10T15:36:18,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741920_1096 (size=12301) 2024-12-10T15:36:18,647 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/2d2751e316b244e982689d8fcbc07498 2024-12-10T15:36:18,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2c90dc4ef7774705a125df1cec5c8c12 is 50, key is test_row_0/B:col10/1733844977770/Put/seqid=0 2024-12-10T15:36:18,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741921_1097 (size=12301) 2024-12-10T15:36:18,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:18,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. as already flushing 2024-12-10T15:36:18,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845038943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845038940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845038944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:18,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:18,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845038944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845039047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845039049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845039049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,120 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2c90dc4ef7774705a125df1cec5c8c12 2024-12-10T15:36:19,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/77edd795976648a080ee5d7af3d7a7cd is 50, key is test_row_0/C:col10/1733844977770/Put/seqid=0 2024-12-10T15:36:19,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741922_1098 (size=12301) 2024-12-10T15:36:19,170 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=412 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/77edd795976648a080ee5d7af3d7a7cd 2024-12-10T15:36:19,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:19,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/2d2751e316b244e982689d8fcbc07498 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498 2024-12-10T15:36:19,190 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498, entries=150, sequenceid=412, filesize=12.0 K 2024-12-10T15:36:19,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/2c90dc4ef7774705a125df1cec5c8c12 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12 2024-12-10T15:36:19,219 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12, entries=150, sequenceid=412, filesize=12.0 K 2024-12-10T15:36:19,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/77edd795976648a080ee5d7af3d7a7cd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd 2024-12-10T15:36:19,228 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd, entries=150, sequenceid=412, filesize=12.0 K 2024-12-10T15:36:19,229 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8a8b39bcfc5042b2f61256808771f62a in 677ms, sequenceid=412, compaction requested=true 2024-12-10T15:36:19,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:19,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:19,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-10T15:36:19,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-10T15:36:19,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-10T15:36:19,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1540 sec 2024-12-10T15:36:19,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.1750 sec 2024-12-10T15:36:19,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:19,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:19,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:19,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:19,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:19,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:19,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:19,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:19,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/04a4d8cdd35a44f4bd91368780581fd1 is 50, key is test_row_0/A:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:19,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741923_1099 (size=14741) 2024-12-10T15:36:19,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/04a4d8cdd35a44f4bd91368780581fd1 2024-12-10T15:36:19,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/7a3c9b46fff24fb3abbb3d428b5821c5 is 50, key is test_row_0/B:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:19,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845039312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845039314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845039315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741924_1100 (size=12301) 2024-12-10T15:36:19,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845039421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845039423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845039423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845039628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845039628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845039628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/7a3c9b46fff24fb3abbb3d428b5821c5 2024-12-10T15:36:19,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/08f255dc8e6540008e311f768884fd74 is 50, key is test_row_0/C:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:19,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741925_1101 (size=12301) 2024-12-10T15:36:19,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36378 deadline: 1733845039935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36422 deadline: 1733845039945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36394 deadline: 1733845039948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:19,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36412 deadline: 1733845039948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:20,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=423 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/08f255dc8e6540008e311f768884fd74 2024-12-10T15:36:20,245 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:36:20,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/04a4d8cdd35a44f4bd91368780581fd1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1 2024-12-10T15:36:20,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1, entries=200, sequenceid=423, filesize=14.4 K 2024-12-10T15:36:20,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/7a3c9b46fff24fb3abbb3d428b5821c5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5 2024-12-10T15:36:20,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5, entries=150, sequenceid=423, filesize=12.0 K 2024-12-10T15:36:20,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/08f255dc8e6540008e311f768884fd74 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74 2024-12-10T15:36:20,393 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x767a8485 to 127.0.0.1:56346 2024-12-10T15:36:20,393 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,394 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c3b736e to 127.0.0.1:56346 2024-12-10T15:36:20,394 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4b5cad1a to 127.0.0.1:56346 2024-12-10T15:36:20,395 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,395 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,404 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6502d571 to 127.0.0.1:56346 2024-12-10T15:36:20,404 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74, entries=150, sequenceid=423, filesize=12.0 K 2024-12-10T15:36:20,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 8a8b39bcfc5042b2f61256808771f62a in 1155ms, sequenceid=423, compaction requested=true 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:20,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8a8b39bcfc5042b2f61256808771f62a:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:20,409 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:20,412 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52496 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:20,412 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/A is initiating minor compaction (all files) 2024-12-10T15:36:20,412 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/A in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:20,412 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/3c2e6a21ff4240a9872cbff8eed1b24f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=51.3 K 2024-12-10T15:36:20,413 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c2e6a21ff4240a9872cbff8eed1b24f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:20,413 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 81120c39f0ef4a34a68617e354d89976, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733844976969 2024-12-10T15:36:20,416 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d2751e316b244e982689d8fcbc07498, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733844977765 2024-12-10T15:36:20,417 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 04a4d8cdd35a44f4bd91368780581fd1, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1733844978934 2024-12-10T15:36:20,418 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:20,418 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/B is initiating minor compaction (all files) 2024-12-10T15:36:20,418 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/B in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:20,418 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f62b72b2f8cb44e9b006af231dcda4c8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=48.9 K 2024-12-10T15:36:20,420 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f62b72b2f8cb44e9b006af231dcda4c8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:20,424 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49231475c651472caf09af9e3057be57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733844976969 2024-12-10T15:36:20,425 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c90dc4ef7774705a125df1cec5c8c12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733844977765 2024-12-10T15:36:20,426 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a3c9b46fff24fb3abbb3d428b5821c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1733844978937 2024-12-10T15:36:20,454 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#A#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:20,455 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/5515e7b623cf49b097eb99ab834f01bd is 50, key is test_row_0/A:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:20,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:20,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:20,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:20,463 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ee2166f to 127.0.0.1:56346 2024-12-10T15:36:20,463 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,464 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f34ff67 to 127.0.0.1:56346 2024-12-10T15:36:20,464 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,466 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6c63ae4e to 127.0.0.1:56346 2024-12-10T15:36:20,467 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,467 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#B#compaction#88 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:20,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0fea0a8f949548c8b968d94631a19bea is 50, key is test_row_0/B:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:20,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741926_1102 (size=13289) 2024-12-10T15:36:20,538 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/5515e7b623cf49b097eb99ab834f01bd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5515e7b623cf49b097eb99ab834f01bd 2024-12-10T15:36:20,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741927_1103 (size=13289) 2024-12-10T15:36:20,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/939e0c332d064ab8ab2dbdc6bcc66af7 is 50, key is test_row_0/A:col10/1733844980455/Put/seqid=0 2024-12-10T15:36:20,569 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x736f1673 to 127.0.0.1:56346 2024-12-10T15:36:20,569 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:20,581 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0fea0a8f949548c8b968d94631a19bea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0fea0a8f949548c8b968d94631a19bea 2024-12-10T15:36:20,583 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/A of 8a8b39bcfc5042b2f61256808771f62a into 5515e7b623cf49b097eb99ab834f01bd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:20,583 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:20,583 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/A, priority=12, startTime=1733844980408; duration=0sec 2024-12-10T15:36:20,583 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:20,583 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:A 2024-12-10T15:36:20,583 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:20,586 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:20,586 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 8a8b39bcfc5042b2f61256808771f62a/C is initiating minor compaction (all files) 2024-12-10T15:36:20,586 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8a8b39bcfc5042b2f61256808771f62a/C in TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:20,586 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/c0134fb20b8d4415a25c8edd4ac47d96, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp, totalSize=48.9 K 2024-12-10T15:36:20,587 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c0134fb20b8d4415a25c8edd4ac47d96, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733844976312 2024-12-10T15:36:20,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d2e052a59c14d24b5ab4aa2f9a81fa0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733844976969 2024-12-10T15:36:20,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 77edd795976648a080ee5d7af3d7a7cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=412, earliestPutTs=1733844977765 2024-12-10T15:36:20,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 08f255dc8e6540008e311f768884fd74, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=423, earliestPutTs=1733844978937 2024-12-10T15:36:20,600 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/B of 8a8b39bcfc5042b2f61256808771f62a into 0fea0a8f949548c8b968d94631a19bea(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:20,600 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:20,600 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/B, priority=12, startTime=1733844980408; duration=0sec 2024-12-10T15:36:20,600 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:20,600 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:B 2024-12-10T15:36:20,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741928_1104 (size=12301) 2024-12-10T15:36:20,679 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8a8b39bcfc5042b2f61256808771f62a#C#compaction#90 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:20,680 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/fa049457f6194281bef20055455b1831 is 50, key is test_row_0/C:col10/1733844979251/Put/seqid=0 2024-12-10T15:36:20,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741929_1105 (size=13289) 2024-12-10T15:36:21,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/939e0c332d064ab8ab2dbdc6bcc66af7 2024-12-10T15:36:21,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0925f3fd200d46218f9404205ee1f712 is 50, key is test_row_0/B:col10/1733844980455/Put/seqid=0 2024-12-10T15:36:21,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741930_1106 (size=12301) 2024-12-10T15:36:21,112 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/fa049457f6194281bef20055455b1831 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/fa049457f6194281bef20055455b1831 2024-12-10T15:36:21,122 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8a8b39bcfc5042b2f61256808771f62a/C of 8a8b39bcfc5042b2f61256808771f62a into fa049457f6194281bef20055455b1831(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:21,122 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:21,122 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a., storeName=8a8b39bcfc5042b2f61256808771f62a/C, priority=12, startTime=1733844980408; duration=0sec 2024-12-10T15:36:21,122 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:21,122 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8a8b39bcfc5042b2f61256808771f62a:C 2024-12-10T15:36:21,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-10T15:36:21,172 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-10T15:36:21,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0925f3fd200d46218f9404205ee1f712 2024-12-10T15:36:21,439 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/d5276aafaadb4eaba137f2dac02af3a0 is 50, key is test_row_0/C:col10/1733844980455/Put/seqid=0 2024-12-10T15:36:21,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741931_1107 (size=12301) 2024-12-10T15:36:21,850 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/d5276aafaadb4eaba137f2dac02af3a0 2024-12-10T15:36:21,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/939e0c332d064ab8ab2dbdc6bcc66af7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/939e0c332d064ab8ab2dbdc6bcc66af7 2024-12-10T15:36:21,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/939e0c332d064ab8ab2dbdc6bcc66af7, entries=150, sequenceid=453, filesize=12.0 K 2024-12-10T15:36:21,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/0925f3fd200d46218f9404205ee1f712 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0925f3fd200d46218f9404205ee1f712 2024-12-10T15:36:21,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0925f3fd200d46218f9404205ee1f712, entries=150, sequenceid=453, filesize=12.0 K 2024-12-10T15:36:21,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/d5276aafaadb4eaba137f2dac02af3a0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/d5276aafaadb4eaba137f2dac02af3a0 2024-12-10T15:36:21,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/d5276aafaadb4eaba137f2dac02af3a0, entries=150, sequenceid=453, filesize=12.0 K 2024-12-10T15:36:21,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=6.71 KB/6870 for 8a8b39bcfc5042b2f61256808771f62a in 1412ms, sequenceid=453, compaction requested=false 2024-12-10T15:36:21,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:21,954 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d38d10 to 127.0.0.1:56346 2024-12-10T15:36:21,954 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 73 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3182 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3085 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1233 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3699 rows 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1244 2024-12-10T15:36:21,955 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3732 rows 2024-12-10T15:36:21,955 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:36:21,955 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e67f019 to 127.0.0.1:56346 2024-12-10T15:36:21,955 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:21,957 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:36:21,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:36:21,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:21,969 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844981969"}]},"ts":"1733844981969"} 2024-12-10T15:36:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:21,970 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:36:22,030 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:36:22,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:36:22,036 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, UNASSIGN}] 2024-12-10T15:36:22,037 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, UNASSIGN 2024-12-10T15:36:22,037 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=8a8b39bcfc5042b2f61256808771f62a, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:22,038 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:36:22,039 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure 8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:22,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:22,194 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:22,195 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 8a8b39bcfc5042b2f61256808771f62a, disabling compactions & flushes 2024-12-10T15:36:22,196 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. after waiting 0 ms 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:22,196 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(2837): Flushing 8a8b39bcfc5042b2f61256808771f62a 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-10T15:36:22,196 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=A 2024-12-10T15:36:22,197 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:22,197 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=B 2024-12-10T15:36:22,197 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:22,197 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8a8b39bcfc5042b2f61256808771f62a, store=C 2024-12-10T15:36:22,197 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:22,201 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/0522e763642a4b24a55ba58814ccd9e9 is 50, key is test_row_0/A:col10/1733844980548/Put/seqid=0 2024-12-10T15:36:22,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741932_1108 (size=9857) 2024-12-10T15:36:22,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:22,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:22,607 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/0522e763642a4b24a55ba58814ccd9e9 2024-12-10T15:36:22,616 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b49db6ec600e401b9c70452708baafeb is 50, key is test_row_0/B:col10/1733844980548/Put/seqid=0 2024-12-10T15:36:22,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741933_1109 (size=9857) 2024-12-10T15:36:23,036 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b49db6ec600e401b9c70452708baafeb 2024-12-10T15:36:23,045 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/ac5afaa82a2e4136bbf1b5747196a181 is 50, key is test_row_0/C:col10/1733844980548/Put/seqid=0 2024-12-10T15:36:23,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741934_1110 (size=9857) 2024-12-10T15:36:23,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:23,449 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/ac5afaa82a2e4136bbf1b5747196a181 2024-12-10T15:36:23,454 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/A/0522e763642a4b24a55ba58814ccd9e9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/0522e763642a4b24a55ba58814ccd9e9 2024-12-10T15:36:23,460 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/0522e763642a4b24a55ba58814ccd9e9, entries=100, sequenceid=461, filesize=9.6 K 2024-12-10T15:36:23,461 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/B/b49db6ec600e401b9c70452708baafeb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b49db6ec600e401b9c70452708baafeb 2024-12-10T15:36:23,471 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b49db6ec600e401b9c70452708baafeb, entries=100, sequenceid=461, filesize=9.6 K 2024-12-10T15:36:23,472 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/.tmp/C/ac5afaa82a2e4136bbf1b5747196a181 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/ac5afaa82a2e4136bbf1b5747196a181 2024-12-10T15:36:23,483 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/ac5afaa82a2e4136bbf1b5747196a181, entries=100, sequenceid=461, filesize=9.6 K 2024-12-10T15:36:23,495 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 8a8b39bcfc5042b2f61256808771f62a in 1298ms, sequenceid=461, compaction requested=true 2024-12-10T15:36:23,495 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/1514479717e546ec9e871cb3dc2e3f76, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/9ab13e04db2c4ee5993d50ae5182b87f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8dc4352d04144ef89966263faac542f1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/55c0f9122e4143658bf3aba0351df8f3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/67d6c15acdad4f17992e99de7e7c9558, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b310e5f79c914b30a5410e110d049eec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/3c2e6a21ff4240a9872cbff8eed1b24f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1] to archive 2024-12-10T15:36:23,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:23,524 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/c259330c349c4dac95a47ad661d2af66 2024-12-10T15:36:23,524 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/215d7470eb4d4386a919b1e4de1bd1e5 2024-12-10T15:36:23,525 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bfcb8760bf004bf19c2e0ef1098c76ed 2024-12-10T15:36:23,525 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/7e9b103fb147492b801527e4794f93f0 2024-12-10T15:36:23,525 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/1514479717e546ec9e871cb3dc2e3f76 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/1514479717e546ec9e871cb3dc2e3f76 2024-12-10T15:36:23,529 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/9ab13e04db2c4ee5993d50ae5182b87f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/9ab13e04db2c4ee5993d50ae5182b87f 2024-12-10T15:36:23,535 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8dc4352d04144ef89966263faac542f1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8dc4352d04144ef89966263faac542f1 2024-12-10T15:36:23,536 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b6e4d11daa974b5f97b7d4f33774107d 2024-12-10T15:36:23,536 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/8c5fbb4122c34975b4be2ef24d4e92de 2024-12-10T15:36:23,536 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/ba9f8e2fcd6b4e06bfcb212d1048dd9a 2024-12-10T15:36:23,537 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/99d2b0c7a6b74dbfbebc14d7c1c8d139 2024-12-10T15:36:23,537 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/df83c5e2147c417390f5fc9d029a05bb 2024-12-10T15:36:23,539 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/aaa1dbc19d78443aa4045b15a5ad174b 2024-12-10T15:36:23,539 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/55c0f9122e4143658bf3aba0351df8f3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/55c0f9122e4143658bf3aba0351df8f3 2024-12-10T15:36:23,539 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/4cde373839f4420c9aa557463393c90f 2024-12-10T15:36:23,541 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/25b31a0525ea4bc692380663230da6ad 2024-12-10T15:36:23,541 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/67d6c15acdad4f17992e99de7e7c9558 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/67d6c15acdad4f17992e99de7e7c9558 2024-12-10T15:36:23,541 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b2a51dadbe19456d80c2a3ac042b768c 2024-12-10T15:36:23,541 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/42055f6d57514beaab36fa87869d5c3e 2024-12-10T15:36:23,541 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/6df7be84f5be49ee924a1e0ec89ec64b 2024-12-10T15:36:23,543 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/fe93889a54a34b28b9055d4f96ffd28b 2024-12-10T15:36:23,543 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5d3a632119074d94a554e773ccfc8f7c 2024-12-10T15:36:23,544 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b310e5f79c914b30a5410e110d049eec to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/b310e5f79c914b30a5410e110d049eec 2024-12-10T15:36:23,545 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/bd08306b93184e6ab9e9d8aab1691bbf 2024-12-10T15:36:23,547 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/e4700be553614010b257434067afd0b9 2024-12-10T15:36:23,547 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/3c2e6a21ff4240a9872cbff8eed1b24f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/3c2e6a21ff4240a9872cbff8eed1b24f 2024-12-10T15:36:23,547 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/2d2751e316b244e982689d8fcbc07498 2024-12-10T15:36:23,547 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/81120c39f0ef4a34a68617e354d89976 2024-12-10T15:36:23,552 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/04a4d8cdd35a44f4bd91368780581fd1 2024-12-10T15:36:23,566 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/4cd421036f114eeda8c43242954b9678, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ff76886239524a28a6757028e9107208, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8180703620764a2bae5413ef6afd4562, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/980d95d1de904d00909672a2a45e569f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/435039e07c0a43999fc38a461dc30736, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d0079a37a9914bfba7062d884c0d1f0c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/1e9f8f87075e4ba98558432f6be91975, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f62b72b2f8cb44e9b006af231dcda4c8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5] to archive 2024-12-10T15:36:23,567 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:23,570 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/23125e145e97451f8644260d1a3ac40f 2024-12-10T15:36:23,571 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d385beb5cc0443d1b0c2dffbe92c1f39 2024-12-10T15:36:23,573 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/4cd421036f114eeda8c43242954b9678 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/4cd421036f114eeda8c43242954b9678 2024-12-10T15:36:23,575 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ae36353a9eec4e9d8c140cf07accbc8e 2024-12-10T15:36:23,575 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ff76886239524a28a6757028e9107208 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/ff76886239524a28a6757028e9107208 2024-12-10T15:36:23,575 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0896aa30d6b644d0bd5476003903c8f1 2024-12-10T15:36:23,575 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5ee1c9642f894d1a9fe94d46c09b4912 2024-12-10T15:36:23,576 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8180703620764a2bae5413ef6afd4562 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8180703620764a2bae5413ef6afd4562 2024-12-10T15:36:23,576 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f3b06b1cdc4b44b0ad109737958e1e1a 2024-12-10T15:36:23,576 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/dbc9fba4a8e9464dbc9f4acc5173eb3a 2024-12-10T15:36:23,577 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/8e1241f531bf4cedb3149e626f1019d6 2024-12-10T15:36:23,577 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/980d95d1de904d00909672a2a45e569f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/980d95d1de904d00909672a2a45e569f 2024-12-10T15:36:23,577 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/66bad501625346e8aac1f898fc904eed 2024-12-10T15:36:23,577 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/435039e07c0a43999fc38a461dc30736 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/435039e07c0a43999fc38a461dc30736 2024-12-10T15:36:23,578 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/fd75d6a258ee4bcb905d9781bea9fe61 2024-12-10T15:36:23,578 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/a9dc47a581aa412b9c7715c221e54916 2024-12-10T15:36:23,579 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/3002b2c23bf14c5fb08b7c3be7a85170 2024-12-10T15:36:23,580 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0d0d0c2fcdaa454a8f0ba04e689d93d6 2024-12-10T15:36:23,580 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d0079a37a9914bfba7062d884c0d1f0c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/d0079a37a9914bfba7062d884c0d1f0c 2024-12-10T15:36:23,580 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5769f6613ca14cf69ea2977854d43cc7 2024-12-10T15:36:23,580 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/1e9f8f87075e4ba98558432f6be91975 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/1e9f8f87075e4ba98558432f6be91975 2024-12-10T15:36:23,581 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b9b806444c8e4829bd3633820c3f3ea5 2024-12-10T15:36:23,582 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2240596fdc5b436692f9aac8e529831b 2024-12-10T15:36:23,582 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/5a579849a02c4e98a703d13f240b046d 2024-12-10T15:36:23,583 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b1b9fad221b0400da619cce42383b9ea 2024-12-10T15:36:23,583 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f62b72b2f8cb44e9b006af231dcda4c8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/f62b72b2f8cb44e9b006af231dcda4c8 2024-12-10T15:36:23,583 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/49231475c651472caf09af9e3057be57 2024-12-10T15:36:23,583 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/2c90dc4ef7774705a125df1cec5c8c12 2024-12-10T15:36:23,583 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/7a3c9b46fff24fb3abbb3d428b5821c5 2024-12-10T15:36:23,584 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5b92018f495b4d738f208cc8c93e60b0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7e0a43ad2ecd4b6793bebb85cd4b548e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cf88f156cd174bc99b9f64060567d66e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/54acb781a713458d8e13839406e7743b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/bcd4abac354548c4be685e635550f2ab, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6b4ceff5c14f44de97ca796edb9c1c60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cca559971a31474da0d233e129dfc3a1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/c0134fb20b8d4415a25c8edd4ac47d96, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74] to archive 2024-12-10T15:36:23,585 DEBUG [StoreCloser-TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:23,588 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/8d0c56f0e4e94467b1c9ae8703309df4 2024-12-10T15:36:23,589 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0596c9dbb82b4948b3ef0dc161600005 2024-12-10T15:36:23,589 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/113db005c9f74dceb1bcf77e86ee108c 2024-12-10T15:36:23,589 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/adf65e92515a4f56898699def1547296 2024-12-10T15:36:23,589 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7e0a43ad2ecd4b6793bebb85cd4b548e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7e0a43ad2ecd4b6793bebb85cd4b548e 2024-12-10T15:36:23,590 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6a319f6f1c1544c58f26864732f1f9d4 2024-12-10T15:36:23,590 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5b92018f495b4d738f208cc8c93e60b0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5b92018f495b4d738f208cc8c93e60b0 2024-12-10T15:36:23,591 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/2353915cefea441483bb9bd5a03b4039 2024-12-10T15:36:23,591 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e918baba87d14cddb5d621cafebaa397 2024-12-10T15:36:23,591 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/0a18acf710b84d619e09f15fbf0e5edb 2024-12-10T15:36:23,591 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/40bac7771a2f4546b2914353fa630382 2024-12-10T15:36:23,592 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cf88f156cd174bc99b9f64060567d66e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cf88f156cd174bc99b9f64060567d66e 2024-12-10T15:36:23,593 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/54acb781a713458d8e13839406e7743b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/54acb781a713458d8e13839406e7743b 2024-12-10T15:36:23,594 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e7515db364b641b68c67805b2745d46b 2024-12-10T15:36:23,594 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/1a300140b9794e87a91a0ea289ef81d8 2024-12-10T15:36:23,594 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6b4ceff5c14f44de97ca796edb9c1c60 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/6b4ceff5c14f44de97ca796edb9c1c60 2024-12-10T15:36:23,595 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/69bc187f77604f819b958c1916b9eb98 2024-12-10T15:36:23,595 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/07b53fd4094d48a7b365f3dde238a8bd 2024-12-10T15:36:23,595 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/e55c6e0436fb4637b9e6ec9855d60ee4 2024-12-10T15:36:23,595 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/bcd4abac354548c4be685e635550f2ab to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/bcd4abac354548c4be685e635550f2ab 2024-12-10T15:36:23,597 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/28911189c55e4594b7032240582de6ad 2024-12-10T15:36:23,597 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/c0134fb20b8d4415a25c8edd4ac47d96 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/c0134fb20b8d4415a25c8edd4ac47d96 2024-12-10T15:36:23,597 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/4ab9750083cf4c7b91e338e3110ac86d 2024-12-10T15:36:23,598 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/7b0344fc18164c1ab5639ec92b599f12 2024-12-10T15:36:23,598 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5f4de534ceaf47c192ed83812985c626 2024-12-10T15:36:23,598 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/77edd795976648a080ee5d7af3d7a7cd 2024-12-10T15:36:23,599 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cca559971a31474da0d233e129dfc3a1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/cca559971a31474da0d233e129dfc3a1 2024-12-10T15:36:23,599 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/08f255dc8e6540008e311f768884fd74 2024-12-10T15:36:23,600 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/5d2e052a59c14d24b5ab4aa2f9a81fa0 2024-12-10T15:36:23,611 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/recovered.edits/464.seqid, newMaxSeqId=464, maxSeqId=1 2024-12-10T15:36:23,617 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a. 2024-12-10T15:36:23,617 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 8a8b39bcfc5042b2f61256808771f62a: 2024-12-10T15:36:23,619 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:23,620 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=8a8b39bcfc5042b2f61256808771f62a, regionState=CLOSED 2024-12-10T15:36:23,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-10T15:36:23,625 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure 8a8b39bcfc5042b2f61256808771f62a, server=bf0fec90ff6d,46239,1733844953049 in 1.5850 sec 2024-12-10T15:36:23,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=25 2024-12-10T15:36:23,627 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=25, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8a8b39bcfc5042b2f61256808771f62a, UNASSIGN in 1.5890 sec 2024-12-10T15:36:23,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-10T15:36:23,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5960 sec 2024-12-10T15:36:23,630 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844983630"}]},"ts":"1733844983630"} 2024-12-10T15:36:23,632 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:36:23,639 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:36:23,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6770 sec 2024-12-10T15:36:24,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-10T15:36:24,074 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-10T15:36:24,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:36:24,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,082 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=28, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T15:36:24,083 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=28, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,085 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:24,089 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/recovered.edits] 2024-12-10T15:36:24,093 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5515e7b623cf49b097eb99ab834f01bd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/5515e7b623cf49b097eb99ab834f01bd 2024-12-10T15:36:24,093 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/0522e763642a4b24a55ba58814ccd9e9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/0522e763642a4b24a55ba58814ccd9e9 2024-12-10T15:36:24,094 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/939e0c332d064ab8ab2dbdc6bcc66af7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/A/939e0c332d064ab8ab2dbdc6bcc66af7 2024-12-10T15:36:24,098 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b49db6ec600e401b9c70452708baafeb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/b49db6ec600e401b9c70452708baafeb 2024-12-10T15:36:24,099 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0fea0a8f949548c8b968d94631a19bea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0fea0a8f949548c8b968d94631a19bea 2024-12-10T15:36:24,099 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0925f3fd200d46218f9404205ee1f712 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/B/0925f3fd200d46218f9404205ee1f712 2024-12-10T15:36:24,102 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/d5276aafaadb4eaba137f2dac02af3a0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/d5276aafaadb4eaba137f2dac02af3a0 2024-12-10T15:36:24,103 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/fa049457f6194281bef20055455b1831 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/fa049457f6194281bef20055455b1831 2024-12-10T15:36:24,103 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/ac5afaa82a2e4136bbf1b5747196a181 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/C/ac5afaa82a2e4136bbf1b5747196a181 2024-12-10T15:36:24,107 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/recovered.edits/464.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a/recovered.edits/464.seqid 2024-12-10T15:36:24,108 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/8a8b39bcfc5042b2f61256808771f62a 2024-12-10T15:36:24,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:36:24,114 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=28, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,123 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-10T15:36:24,127 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:36:24,159 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:36:24,161 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=28, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,161 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:36:24,161 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733844984161"}]},"ts":"9223372036854775807"} 2024-12-10T15:36:24,164 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:36:24,164 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8a8b39bcfc5042b2f61256808771f62a, NAME => 'TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:36:24,164 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:36:24,165 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733844984164"}]},"ts":"9223372036854775807"} 2024-12-10T15:36:24,168 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:36:24,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T15:36:24,185 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=28, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,186 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 107 msec 2024-12-10T15:36:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-10T15:36:24,386 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-10T15:36:24,401 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=246 (was 218) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_359415945_22 at /127.0.0.1:32826 [Waiting for operation #272] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-65916801_22 at /127.0.0.1:57682 [Waiting for operation #96] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;bf0fec90ff6d:46239-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-65916801_22 at /127.0.0.1:60366 [Waiting for operation #245] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1112 (was 918) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1232 (was 2169) 2024-12-10T15:36:24,411 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=246, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=1112, ProcessCount=11, AvailableMemoryMB=1232 2024-12-10T15:36:24,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:36:24,413 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:36:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:24,416 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:36:24,416 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:24,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 29 2024-12-10T15:36:24,417 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:36:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:24,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741935_1111 (size=963) 2024-12-10T15:36:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:24,828 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:36:24,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741936_1112 (size=53) 2024-12-10T15:36:25,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 94b551ecd4747174537fcd83980a419f, disabling compactions & flushes 2024-12-10T15:36:25,235 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. after waiting 0 ms 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,235 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,235 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:25,236 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:36:25,236 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733844985236"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733844985236"}]},"ts":"1733844985236"} 2024-12-10T15:36:25,238 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:36:25,239 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:36:25,239 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844985239"}]},"ts":"1733844985239"} 2024-12-10T15:36:25,240 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:36:25,255 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, ASSIGN}] 2024-12-10T15:36:25,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, ASSIGN 2024-12-10T15:36:25,257 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=30, ppid=29, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:36:25,408 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:25,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; OpenRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:25,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:25,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:25,565 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,565 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(7285): Opening region: {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:36:25,565 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,565 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:36:25,565 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(7327): checking encryption for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,566 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(7330): checking classloading for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,567 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,568 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:25,568 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName A 2024-12-10T15:36:25,568 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:25,569 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:25,569 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,570 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:25,571 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName B 2024-12-10T15:36:25,571 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:25,572 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:25,572 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,573 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:25,573 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName C 2024-12-10T15:36:25,573 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:25,574 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:25,574 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,574 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,575 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,576 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:36:25,577 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(1085): writing seq id for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:25,579 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:36:25,579 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(1102): Opened 94b551ecd4747174537fcd83980a419f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74626808, jitterRate=0.11202609539031982}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:36:25,580 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegion(1001): Region open journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:25,581 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., pid=31, masterSystemTime=1733844985561 2024-12-10T15:36:25,582 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,582 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=31}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:25,583 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=30 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:25,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-10T15:36:25,586 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; OpenRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 in 175 msec 2024-12-10T15:36:25,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=30, resume processing ppid=29 2024-12-10T15:36:25,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, ppid=29, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, ASSIGN in 330 msec 2024-12-10T15:36:25,588 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:36:25,588 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733844985588"}]},"ts":"1733844985588"} 2024-12-10T15:36:25,589 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:36:25,606 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=29, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:36:25,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1930 sec 2024-12-10T15:36:26,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-10T15:36:26,525 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 29 completed 2024-12-10T15:36:26,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26401a5f to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@407e6b5c 2024-12-10T15:36:26,548 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6eb305fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:26,553 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:26,561 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:26,567 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:36:26,570 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:36:26,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:36:26,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:36:26,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:26,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741937_1113 (size=999) 2024-12-10T15:36:27,007 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T15:36:27,007 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T15:36:27,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:36:27,020 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, REOPEN/MOVE}] 2024-12-10T15:36:27,020 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, REOPEN/MOVE 2024-12-10T15:36:27,021 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,022 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:36:27,022 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; CloseRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:27,174 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,174 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(124): Close 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,174 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:36:27,174 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1681): Closing 94b551ecd4747174537fcd83980a419f, disabling compactions & flushes 2024-12-10T15:36:27,175 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,175 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,175 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. after waiting 0 ms 2024-12-10T15:36:27,175 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,178 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T15:36:27,179 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,179 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1635): Region close journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:27,179 WARN [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegionServer(3786): Not adding moved region record: 94b551ecd4747174537fcd83980a419f to self. 2024-12-10T15:36:27,181 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(170): Closed 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,181 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=CLOSED 2024-12-10T15:36:27,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-10T15:36:27,183 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 in 160 msec 2024-12-10T15:36:27,184 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=34, ppid=33, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, REOPEN/MOVE; state=CLOSED, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=true 2024-12-10T15:36:27,334 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,335 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE; OpenRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:27,433 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-10T15:36:27,434 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-10T15:36:27,486 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,489 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,489 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(7285): Opening region: {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:36:27,490 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,490 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:36:27,490 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(7327): checking encryption for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,490 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(7330): checking classloading for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,493 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,494 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:27,499 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName A 2024-12-10T15:36:27,502 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:27,502 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:27,503 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,504 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:27,504 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName B 2024-12-10T15:36:27,504 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:27,504 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:27,505 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,505 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:27,505 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 94b551ecd4747174537fcd83980a419f columnFamilyName C 2024-12-10T15:36:27,505 DEBUG [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:27,506 INFO [StoreOpener-94b551ecd4747174537fcd83980a419f-1 {}] regionserver.HStore(327): Store=94b551ecd4747174537fcd83980a419f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:27,506 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,507 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,507 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,509 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:36:27,510 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(1085): writing seq id for 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,511 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(1102): Opened 94b551ecd4747174537fcd83980a419f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74861729, jitterRate=0.11552669107913971}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:36:27,513 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegion(1001): Region open journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:27,514 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., pid=36, masterSystemTime=1733844987486 2024-12-10T15:36:27,515 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,515 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=36}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,515 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=34 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=OPEN, openSeqNum=5, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=34 2024-12-10T15:36:27,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=34, state=SUCCESS; OpenRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 in 181 msec 2024-12-10T15:36:27,519 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=33 2024-12-10T15:36:27,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=33, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, REOPEN/MOVE in 498 msec 2024-12-10T15:36:27,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-10T15:36:27,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 511 msec 2024-12-10T15:36:27,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 943 msec 2024-12-10T15:36:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-10T15:36:27,534 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-12-10T15:36:27,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,601 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e3a4420 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ebda6ad 2024-12-10T15:36:27,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,617 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-12-10T15:36:27,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46114993, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,635 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-12-10T15:36:27,647 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68f0be85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,649 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x22e911df to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78cafade 2024-12-10T15:36:27,663 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@152377d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,665 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x14c16cd4 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a52344f 2024-12-10T15:36:27,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3448d233, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,684 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0341384e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8ba8425 2024-12-10T15:36:27,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a11164b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,708 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26b120d9 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7af61386 2024-12-10T15:36:27,722 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a7e1dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c1ec7ee to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63e87c8 2024-12-10T15:36:27,739 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31a027db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:27,751 DEBUG [hconnection-0x54365b78-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,757 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:27,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees 2024-12-10T15:36:27,758 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=37, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:27,759 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=37, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:27,759 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:27,767 DEBUG [hconnection-0x3006e540-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,769 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,771 DEBUG [hconnection-0x5b607ff4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,772 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,783 DEBUG [hconnection-0x2c47e95c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,785 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:27,793 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,794 DEBUG [hconnection-0x5b774629-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,795 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41978, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,799 DEBUG [hconnection-0x761d908d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,800 DEBUG [hconnection-0x55c3144e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,801 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,801 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,811 DEBUG [hconnection-0x2a49bace-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,814 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,819 DEBUG [hconnection-0x35e09396-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:27,820 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42010, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:27,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845047837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845047838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845047840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845047849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845047849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:27,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b20dd74c5287424cbd9da769719df6d0_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844987781/Put/seqid=0 2024-12-10T15:36:27,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741938_1114 (size=12154) 2024-12-10T15:36:27,916 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:27,919 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-10T15:36:27,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:27,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:27,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] handler.RSProcedureHandler(58): pid=38 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:27,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=38 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=38 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:27,922 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b20dd74c5287424cbd9da769719df6d0_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b20dd74c5287424cbd9da769719df6d0_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:27,924 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/92ba4dadfa784e508dabcd24966580b5, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:27,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/92ba4dadfa784e508dabcd24966580b5 is 175, key is test_row_0/A:col10/1733844987781/Put/seqid=0 2024-12-10T15:36:27,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845047951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845047952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845047953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845047955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845047955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:27,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741939_1115 (size=30955) 2024-12-10T15:36:27,978 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/92ba4dadfa784e508dabcd24966580b5 2024-12-10T15:36:28,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/7344dfe10a8d40e5947d1936abe1c182 is 50, key is test_row_0/B:col10/1733844987781/Put/seqid=0 2024-12-10T15:36:28,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741940_1116 (size=12001) 2024-12-10T15:36:28,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/7344dfe10a8d40e5947d1936abe1c182 2024-12-10T15:36:28,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:28,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-10T15:36:28,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:28,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:28,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:28,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] handler.RSProcedureHandler(58): pid=38 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:28,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=38 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:28,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=38 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:28,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/90192224fdc149eca30fb8798fa29613 is 50, key is test_row_0/C:col10/1733844987781/Put/seqid=0 2024-12-10T15:36:28,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845048160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845048160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845048162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845048162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845048163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741941_1117 (size=12001) 2024-12-10T15:36:28,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/90192224fdc149eca30fb8798fa29613 2024-12-10T15:36:28,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/92ba4dadfa784e508dabcd24966580b5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5 2024-12-10T15:36:28,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5, entries=150, sequenceid=15, filesize=30.2 K 2024-12-10T15:36:28,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/7344dfe10a8d40e5947d1936abe1c182 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182 2024-12-10T15:36:28,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182, entries=150, sequenceid=15, filesize=11.7 K 2024-12-10T15:36:28,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/90192224fdc149eca30fb8798fa29613 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613 2024-12-10T15:36:28,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613, entries=150, sequenceid=15, filesize=11.7 K 2024-12-10T15:36:28,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 94b551ecd4747174537fcd83980a419f in 430ms, sequenceid=15, compaction requested=false 2024-12-10T15:36:28,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:28,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-10T15:36:28,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:28,243 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:28,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:28,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f1fffa68adb242e79446d189c585316f_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844987840/Put/seqid=0 2024-12-10T15:36:28,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741942_1118 (size=12154) 2024-12-10T15:36:28,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:28,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:28,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:28,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845048505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845048506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845048509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845048514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845048515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845048614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845048614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845048615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845048618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845048624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:28,732 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f1fffa68adb242e79446d189c585316f_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f1fffa68adb242e79446d189c585316f_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:28,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/015ca286e51d4659b9a33d836de72cda, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:28,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/015ca286e51d4659b9a33d836de72cda is 175, key is test_row_0/A:col10/1733844987840/Put/seqid=0 2024-12-10T15:36:28,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741943_1119 (size=30955) 2024-12-10T15:36:28,763 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/015ca286e51d4659b9a33d836de72cda 2024-12-10T15:36:28,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0b64b6ad54684b8abad7cf1ff30163ca is 50, key is test_row_0/B:col10/1733844987840/Put/seqid=0 2024-12-10T15:36:28,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845048821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845048824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845048824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845048821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741944_1120 (size=12001) 2024-12-10T15:36:28,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:28,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845048836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:28,837 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0b64b6ad54684b8abad7cf1ff30163ca 2024-12-10T15:36:28,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/f62257d602eb4641a9c2e1d5030e6be9 is 50, key is test_row_0/C:col10/1733844987840/Put/seqid=0 2024-12-10T15:36:28,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:28,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741945_1121 (size=12001) 2024-12-10T15:36:29,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845049127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845049131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,144 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845049139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845049140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845049138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,212 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:36:29,297 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/f62257d602eb4641a9c2e1d5030e6be9 2024-12-10T15:36:29,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/015ca286e51d4659b9a33d836de72cda as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda 2024-12-10T15:36:29,365 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda, entries=150, sequenceid=40, filesize=30.2 K 2024-12-10T15:36:29,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0b64b6ad54684b8abad7cf1ff30163ca as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca 2024-12-10T15:36:29,412 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T15:36:29,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/f62257d602eb4641a9c2e1d5030e6be9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9 2024-12-10T15:36:29,452 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9, entries=150, sequenceid=40, filesize=11.7 K 2024-12-10T15:36:29,463 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 94b551ecd4747174537fcd83980a419f in 1220ms, sequenceid=40, compaction requested=false 2024-12-10T15:36:29,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:29,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=38 2024-12-10T15:36:29,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=38 2024-12-10T15:36:29,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-10T15:36:29,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7090 sec 2024-12-10T15:36:29,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees in 1.7140 sec 2024-12-10T15:36:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:29,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:29,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:29,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121086efa0e9c30343179c5ca31bea122a49_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:29,710 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845049703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845049708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845049711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845049712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845049714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741946_1122 (size=12154) 2024-12-10T15:36:29,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845049813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845049815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845049819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845049820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:29,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845049820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-10T15:36:29,901 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-12-10T15:36:29,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:29,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-12-10T15:36:29,916 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:29,917 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:29,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:30,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:30,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845050027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845050028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845050029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845050029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845050032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,070 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,071 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,125 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:30,133 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121086efa0e9c30343179c5ca31bea122a49_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121086efa0e9c30343179c5ca31bea122a49_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:30,135 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7d70817a2bc74d53b67ccce5cf5f22e3, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:30,135 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7d70817a2bc74d53b67ccce5cf5f22e3 is 175, key is test_row_0/A:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:30,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741947_1123 (size=30955) 2024-12-10T15:36:30,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:30,231 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845050333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845050334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845050334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845050336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845050338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:30,544 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7d70817a2bc74d53b67ccce5cf5f22e3 2024-12-10T15:36:30,544 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43e9d81679ee41279783b1a6ec3adeee is 50, key is test_row_0/B:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:30,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741948_1124 (size=12001) 2024-12-10T15:36:30,701 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,704 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,846 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845050840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845050845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845050845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845050845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:30,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845050852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,856 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:30,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:30,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:30,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:30,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:30,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43e9d81679ee41279783b1a6ec3adeee 2024-12-10T15:36:31,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aeb9ea2f9f1c4b20917937bc70c0f776 is 50, key is test_row_0/C:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:31,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:31,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:31,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:31,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:31,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,024 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:31,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:31,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:31,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741949_1125 (size=12001) 2024-12-10T15:36:31,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aeb9ea2f9f1c4b20917937bc70c0f776 2024-12-10T15:36:31,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7d70817a2bc74d53b67ccce5cf5f22e3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3 2024-12-10T15:36:31,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3, entries=150, sequenceid=55, filesize=30.2 K 2024-12-10T15:36:31,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43e9d81679ee41279783b1a6ec3adeee as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee 2024-12-10T15:36:31,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T15:36:31,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aeb9ea2f9f1c4b20917937bc70c0f776 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776 2024-12-10T15:36:31,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776, entries=150, sequenceid=55, filesize=11.7 K 2024-12-10T15:36:31,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for 94b551ecd4747174537fcd83980a419f in 1466ms, sequenceid=55, compaction requested=true 2024-12-10T15:36:31,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:31,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:31,106 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:31,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:31,107 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:31,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:31,109 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:31,109 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:31,109 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,109 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=90.7 K 2024-12-10T15:36:31,109 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,110 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3] 2024-12-10T15:36:31,111 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92ba4dadfa784e508dabcd24966580b5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733844987781 2024-12-10T15:36:31,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:31,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:31,111 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,111 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.2 K 2024-12-10T15:36:31,112 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 015ca286e51d4659b9a33d836de72cda, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733844987838 2024-12-10T15:36:31,112 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7344dfe10a8d40e5947d1936abe1c182, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733844987781 2024-12-10T15:36:31,112 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d70817a2bc74d53b67ccce5cf5f22e3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:31,112 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b64b6ad54684b8abad7cf1ff30163ca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733844987838 2024-12-10T15:36:31,113 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 43e9d81679ee41279783b1a6ec3adeee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:31,139 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#105 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:31,141 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/a3fc0d5a01af4deba6249a2256e68116 is 50, key is test_row_0/B:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:31,142 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:31,160 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121033977f64398b403fad22a68b0760d8b8_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:31,165 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121033977f64398b403fad22a68b0760d8b8_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:31,166 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121033977f64398b403fad22a68b0760d8b8_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:31,179 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:31,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-10T15:36:31,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741950_1126 (size=12104) 2024-12-10T15:36:31,183 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T15:36:31,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:31,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:31,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:31,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741951_1127 (size=4469) 2024-12-10T15:36:31,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108270aa97c2184bcc89f328c90422ce72_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844989707/Put/seqid=0 2024-12-10T15:36:31,254 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#106 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:31,257 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/85929a8524264170982a1d714762312d is 175, key is test_row_0/A:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:31,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741952_1128 (size=12154) 2024-12-10T15:36:31,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,316 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108270aa97c2184bcc89f328c90422ce72_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108270aa97c2184bcc89f328c90422ce72_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:31,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/df5f54dbc99045f38221f6faf544ffcf, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:31,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/df5f54dbc99045f38221f6faf544ffcf is 175, key is test_row_0/A:col10/1733844989707/Put/seqid=0 2024-12-10T15:36:31,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741953_1129 (size=31058) 2024-12-10T15:36:31,332 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/85929a8524264170982a1d714762312d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d 2024-12-10T15:36:31,347 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 85929a8524264170982a1d714762312d(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:31,347 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:31,347 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=13, startTime=1733844991106; duration=0sec 2024-12-10T15:36:31,347 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:31,347 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:31,347 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:31,350 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:31,350 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:31,350 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,350 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.2 K 2024-12-10T15:36:31,351 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90192224fdc149eca30fb8798fa29613, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733844987781 2024-12-10T15:36:31,359 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f62257d602eb4641a9c2e1d5030e6be9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733844987838 2024-12-10T15:36:31,361 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting aeb9ea2f9f1c4b20917937bc70c0f776, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:31,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741954_1130 (size=30955) 2024-12-10T15:36:31,364 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/df5f54dbc99045f38221f6faf544ffcf 2024-12-10T15:36:31,374 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:31,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/90ffc0c60e29491ca94194b50faa8bd5 is 50, key is test_row_0/C:col10/1733844989637/Put/seqid=0 2024-12-10T15:36:31,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/63c5de94a15941f984ed3c63979e5367 is 50, key is test_row_0/B:col10/1733844989707/Put/seqid=0 2024-12-10T15:36:31,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741955_1131 (size=12104) 2024-12-10T15:36:31,466 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/90ffc0c60e29491ca94194b50faa8bd5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90ffc0c60e29491ca94194b50faa8bd5 2024-12-10T15:36:31,476 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 90ffc0c60e29491ca94194b50faa8bd5(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:31,476 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:31,478 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=13, startTime=1733844991107; duration=0sec 2024-12-10T15:36:31,479 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:31,479 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:31,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741956_1132 (size=12001) 2024-12-10T15:36:31,483 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/63c5de94a15941f984ed3c63979e5367 2024-12-10T15:36:31,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9bbbe77e3f184447b129c86b5067f41b is 50, key is test_row_0/C:col10/1733844989707/Put/seqid=0 2024-12-10T15:36:31,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741957_1133 (size=12001) 2024-12-10T15:36:31,534 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9bbbe77e3f184447b129c86b5067f41b 2024-12-10T15:36:31,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/df5f54dbc99045f38221f6faf544ffcf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf 2024-12-10T15:36:31,550 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf, entries=150, sequenceid=78, filesize=30.2 K 2024-12-10T15:36:31,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/63c5de94a15941f984ed3c63979e5367 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367 2024-12-10T15:36:31,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,562 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:36:31,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9bbbe77e3f184447b129c86b5067f41b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b 2024-12-10T15:36:31,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,587 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:36:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,599 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 94b551ecd4747174537fcd83980a419f in 419ms, sequenceid=78, compaction requested=false 2024-12-10T15:36:31,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:31,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:31,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-10T15:36:31,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-10T15:36:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-10T15:36:31,619 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6870 sec 2024-12-10T15:36:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,622 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/a3fc0d5a01af4deba6249a2256e68116 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/a3fc0d5a01af4deba6249a2256e68116 2024-12-10T15:36:31,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 1.7140 sec 2024-12-10T15:36:31,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,629 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into a3fc0d5a01af4deba6249a2256e68116(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:31,629 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:31,630 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=13, startTime=1733844991106; duration=0sec 2024-12-10T15:36:31,630 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:31,630 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:31,974 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:31,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:31,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c419d22795e547af9efd63704f81e0f1_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-10T15:36:32,024 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-10T15:36:32,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-10T15:36:32,029 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:32,029 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:32,030 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:32,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845052039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741958_1134 (size=21918) 2024-12-10T15:36:32,062 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,068 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c419d22795e547af9efd63704f81e0f1_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c419d22795e547af9efd63704f81e0f1_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:32,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,070 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/399128fef0c449a9b56c6dd152f93218, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845052059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845052060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/399128fef0c449a9b56c6dd152f93218 is 175, key is test_row_0/A:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845052061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845052064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741959_1135 (size=65323) 2024-12-10T15:36:32,120 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/399128fef0c449a9b56c6dd152f93218 2024-12-10T15:36:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:32,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8823edb95c054d91acd31a1932d67748 is 50, key is test_row_0/B:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741960_1136 (size=12001) 2024-12-10T15:36:32,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8823edb95c054d91acd31a1932d67748 2024-12-10T15:36:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845052162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845052171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845052172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845052174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,183 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-10T15:36:32,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:32,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:32,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:32,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845052187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/1e003d2695944244b42eba4f66c8ba44 is 50, key is test_row_0/C:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741961_1137 (size=12001) 2024-12-10T15:36:32,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/1e003d2695944244b42eba4f66c8ba44 2024-12-10T15:36:32,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/399128fef0c449a9b56c6dd152f93218 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218 2024-12-10T15:36:32,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218, entries=350, sequenceid=92, filesize=63.8 K 2024-12-10T15:36:32,309 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8823edb95c054d91acd31a1932d67748 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748 2024-12-10T15:36:32,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T15:36:32,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/1e003d2695944244b42eba4f66c8ba44 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44 2024-12-10T15:36:32,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:32,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44, entries=150, sequenceid=92, filesize=11.7 K 2024-12-10T15:36:32,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 94b551ecd4747174537fcd83980a419f in 361ms, sequenceid=92, compaction requested=true 2024-12-10T15:36:32,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:32,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:32,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:32,335 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:32,335 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:32,336 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:32,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:32,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:32,337 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:32,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:32,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:32,337 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,337 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/a3fc0d5a01af4deba6249a2256e68116, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.3 K 2024-12-10T15:36:32,337 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127336 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:32,337 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:32,337 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,337 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=124.4 K 2024-12-10T15:36:32,337 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,337 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218] 2024-12-10T15:36:32,338 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a3fc0d5a01af4deba6249a2256e68116, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:32,338 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85929a8524264170982a1d714762312d, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:32,338 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 63c5de94a15941f984ed3c63979e5367, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733844989707 2024-12-10T15:36:32,338 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting df5f54dbc99045f38221f6faf544ffcf, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733844989707 2024-12-10T15:36:32,339 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8823edb95c054d91acd31a1932d67748, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991955 2024-12-10T15:36:32,339 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 399128fef0c449a9b56c6dd152f93218, keycount=350, bloomtype=ROW, size=63.8 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991934 2024-12-10T15:36:32,339 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-10T15:36:32,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,340 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:32,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:32,364 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,381 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:32,382 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/988138fbae0a4f9ba1e9ac9fe4ff49be is 50, key is test_row_0/B:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:32,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:32,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fec977c77189423ea7fe0bbdaeac728b_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844992059/Put/seqid=0 2024-12-10T15:36:32,408 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210af0f286e4f644e48a659162ed3cb1d10_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,411 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210af0f286e4f644e48a659162ed3cb1d10_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,411 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210af0f286e4f644e48a659162ed3cb1d10_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845052406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845052411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845052416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845052421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845052422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741964_1140 (size=4469) 2024-12-10T15:36:32,463 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#114 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:32,464 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/d3f1bf63c20047c9a5192463cd610254 is 175, key is test_row_0/A:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741962_1138 (size=12207) 2024-12-10T15:36:32,485 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/988138fbae0a4f9ba1e9ac9fe4ff49be as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/988138fbae0a4f9ba1e9ac9fe4ff49be 2024-12-10T15:36:32,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741963_1139 (size=12154) 2024-12-10T15:36:32,508 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into 988138fbae0a4f9ba1e9ac9fe4ff49be(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:32,508 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:32,508 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=13, startTime=1733844992335; duration=0sec 2024-12-10T15:36:32,508 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:32,508 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:32,508 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:32,509 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:32,509 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:32,509 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:32,510 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90ffc0c60e29491ca94194b50faa8bd5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.3 K 2024-12-10T15:36:32,510 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 90ffc0c60e29491ca94194b50faa8bd5, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733844988506 2024-12-10T15:36:32,510 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bbbe77e3f184447b129c86b5067f41b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733844989707 2024-12-10T15:36:32,511 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e003d2695944244b42eba4f66c8ba44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991955 2024-12-10T15:36:32,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845052523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845052523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845052523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,533 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845052531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845052531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741965_1141 (size=31161) 2024-12-10T15:36:32,550 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:32,550 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/d3f1bf63c20047c9a5192463cd610254 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254 2024-12-10T15:36:32,550 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/fc0b6d60fcdd453f8ac983889809ff28 is 50, key is test_row_0/C:col10/1733844991970/Put/seqid=0 2024-12-10T15:36:32,558 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into d3f1bf63c20047c9a5192463cd610254(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:32,558 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:32,558 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=13, startTime=1733844992335; duration=0sec 2024-12-10T15:36:32,558 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:32,558 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:32,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741966_1142 (size=12207) 2024-12-10T15:36:32,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/fc0b6d60fcdd453f8ac983889809ff28 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/fc0b6d60fcdd453f8ac983889809ff28 2024-12-10T15:36:32,603 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into fc0b6d60fcdd453f8ac983889809ff28(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:32,603 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:32,603 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=13, startTime=1733844992337; duration=0sec 2024-12-10T15:36:32,603 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:32,603 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:32,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:32,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845052731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845052731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845052732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845052738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:32,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845052740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:32,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:32,909 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fec977c77189423ea7fe0bbdaeac728b_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fec977c77189423ea7fe0bbdaeac728b_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:32,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7c703f442e60411ca597f489b21b6ad6, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:32,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7c703f442e60411ca597f489b21b6ad6 is 175, key is test_row_0/A:col10/1733844992059/Put/seqid=0 2024-12-10T15:36:32,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741967_1143 (size=30955) 2024-12-10T15:36:32,966 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7c703f442e60411ca597f489b21b6ad6 2024-12-10T15:36:32,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0123da49372142c1ab83b33fd53994c6 is 50, key is test_row_0/B:col10/1733844992059/Put/seqid=0 2024-12-10T15:36:33,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741968_1144 (size=12001) 2024-12-10T15:36:33,032 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0123da49372142c1ab83b33fd53994c6 2024-12-10T15:36:33,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845053036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845053044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845053048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845053047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/2e1d1765f0ed41569a1d26d2afff4d5d is 50, key is test_row_0/C:col10/1733844992059/Put/seqid=0 2024-12-10T15:36:33,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845053051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741969_1145 (size=12001) 2024-12-10T15:36:33,098 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/2e1d1765f0ed41569a1d26d2afff4d5d 2024-12-10T15:36:33,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7c703f442e60411ca597f489b21b6ad6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6 2024-12-10T15:36:33,111 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6, entries=150, sequenceid=118, filesize=30.2 K 2024-12-10T15:36:33,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0123da49372142c1ab83b33fd53994c6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6 2024-12-10T15:36:33,121 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T15:36:33,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/2e1d1765f0ed41569a1d26d2afff4d5d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d 2024-12-10T15:36:33,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:33,139 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T15:36:33,151 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 94b551ecd4747174537fcd83980a419f in 811ms, sequenceid=118, compaction requested=false 2024-12-10T15:36:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:33,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-10T15:36:33,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-10T15:36:33,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-10T15:36:33,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1290 sec 2024-12-10T15:36:33,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 1.1340 sec 2024-12-10T15:36:33,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:33,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:36:33,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:33,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:33,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:33,546 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210717b451fe68b447abdf82b826f33ceb0_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845053598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845053599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845053601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845053601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845053603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741970_1146 (size=12204) 2024-12-10T15:36:33,608 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:33,613 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210717b451fe68b447abdf82b826f33ceb0_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210717b451fe68b447abdf82b826f33ceb0_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:33,614 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/84c26c7ddea84bce9b0168618d852ad6, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:33,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/84c26c7ddea84bce9b0168618d852ad6 is 175, key is test_row_0/A:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741971_1147 (size=31005) 2024-12-10T15:36:33,654 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/84c26c7ddea84bce9b0168618d852ad6 2024-12-10T15:36:33,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/eec13a2bfb7e47edb007ad53b97e7795 is 50, key is test_row_0/B:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845053706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845053705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845053706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845053707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845053708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741972_1148 (size=12051) 2024-12-10T15:36:33,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/eec13a2bfb7e47edb007ad53b97e7795 2024-12-10T15:36:33,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5aec35dba6544850b171f486ab876a4a is 50, key is test_row_0/C:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741973_1149 (size=12051) 2024-12-10T15:36:33,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5aec35dba6544850b171f486ab876a4a 2024-12-10T15:36:33,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/84c26c7ddea84bce9b0168618d852ad6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6 2024-12-10T15:36:33,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6, entries=150, sequenceid=133, filesize=30.3 K 2024-12-10T15:36:33,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/eec13a2bfb7e47edb007ad53b97e7795 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795 2024-12-10T15:36:33,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795, entries=150, sequenceid=133, filesize=11.8 K 2024-12-10T15:36:33,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5aec35dba6544850b171f486ab876a4a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a 2024-12-10T15:36:33,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a, entries=150, sequenceid=133, filesize=11.8 K 2024-12-10T15:36:33,804 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 94b551ecd4747174537fcd83980a419f in 259ms, sequenceid=133, compaction requested=true 2024-12-10T15:36:33,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:33,806 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:33,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:33,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:33,806 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:33,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:33,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:33,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:33,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:33,807 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93121 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:33,807 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:33,807 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:33,807 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:33,807 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:33,807 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:33,808 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/988138fbae0a4f9ba1e9ac9fe4ff49be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.4 K 2024-12-10T15:36:33,808 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=90.9 K 2024-12-10T15:36:33,808 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:33,808 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6] 2024-12-10T15:36:33,808 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3f1bf63c20047c9a5192463cd610254, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991955 2024-12-10T15:36:33,808 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 988138fbae0a4f9ba1e9ac9fe4ff49be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991955 2024-12-10T15:36:33,808 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0123da49372142c1ab83b33fd53994c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733844992042 2024-12-10T15:36:33,808 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c703f442e60411ca597f489b21b6ad6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733844992042 2024-12-10T15:36:33,809 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c26c7ddea84bce9b0168618d852ad6, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:33,809 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting eec13a2bfb7e47edb007ad53b97e7795, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:33,837 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#123 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:33,838 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/88667b6cb33540d7b976f21e690d22f7 is 50, key is test_row_0/B:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,848 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:33,856 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121011d6f47f02e84536851045482347a789_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:33,858 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121011d6f47f02e84536851045482347a789_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:33,858 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121011d6f47f02e84536851045482347a789_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:33,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741974_1150 (size=12359) 2024-12-10T15:36:33,893 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/88667b6cb33540d7b976f21e690d22f7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/88667b6cb33540d7b976f21e690d22f7 2024-12-10T15:36:33,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741975_1151 (size=4469) 2024-12-10T15:36:33,916 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into 88667b6cb33540d7b976f21e690d22f7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:33,916 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:33,916 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=13, startTime=1733844993806; duration=0sec 2024-12-10T15:36:33,916 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:33,916 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:33,916 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:33,918 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:33,918 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:33,918 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:33,918 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/fc0b6d60fcdd453f8ac983889809ff28, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=35.4 K 2024-12-10T15:36:33,919 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fc0b6d60fcdd453f8ac983889809ff28, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733844991955 2024-12-10T15:36:33,919 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e1d1765f0ed41569a1d26d2afff4d5d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733844992042 2024-12-10T15:36:33,920 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aec35dba6544850b171f486ab876a4a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:33,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T15:36:33,928 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:33,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:33,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:33,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:33,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845053934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845053936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845053937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845053937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845053938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:33,944 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#125 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:33,945 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/6e1fcd987c3b46d39da9fb3e93d69b0f is 50, key is test_row_0/C:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:33,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102765691ee3c84fcc859b57715c08cb31_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844993920/Put/seqid=0 2024-12-10T15:36:33,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741976_1152 (size=12359) 2024-12-10T15:36:34,005 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/6e1fcd987c3b46d39da9fb3e93d69b0f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6e1fcd987c3b46d39da9fb3e93d69b0f 2024-12-10T15:36:34,011 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 6e1fcd987c3b46d39da9fb3e93d69b0f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:34,011 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:34,011 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=13, startTime=1733844993807; duration=0sec 2024-12-10T15:36:34,011 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:34,011 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:34,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741977_1153 (size=12304) 2024-12-10T15:36:34,017 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:34,022 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412102765691ee3c84fcc859b57715c08cb31_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102765691ee3c84fcc859b57715c08cb31_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:34,024 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/fc9dd20615df43b49e08a03bf06c2c78, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:34,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/fc9dd20615df43b49e08a03bf06c2c78 is 175, key is test_row_0/A:col10/1733844993920/Put/seqid=0 2024-12-10T15:36:34,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845054044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845054044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845054044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845054048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845054049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741978_1154 (size=31105) 2024-12-10T15:36:34,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-10T15:36:34,134 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-10T15:36:34,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:34,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-10T15:36:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T15:36:34,140 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:34,141 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:34,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:34,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T15:36:34,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845054247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845054249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845054256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845054263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845054247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,293 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T15:36:34,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:34,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,309 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#124 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:34,310 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/83ea3f03ed964ac6b1ee5d063ba4f4ac is 175, key is test_row_0/A:col10/1733844992420/Put/seqid=0 2024-12-10T15:36:34,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741979_1155 (size=31313) 2024-12-10T15:36:34,342 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/83ea3f03ed964ac6b1ee5d063ba4f4ac as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac 2024-12-10T15:36:34,352 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 83ea3f03ed964ac6b1ee5d063ba4f4ac(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:34,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:34,352 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=13, startTime=1733844993806; duration=0sec 2024-12-10T15:36:34,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:34,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:34,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T15:36:34,450 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T15:36:34,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:34,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,478 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/fc9dd20615df43b49e08a03bf06c2c78 2024-12-10T15:36:34,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/567e877889994ac48d0d985f043cecf9 is 50, key is test_row_0/B:col10/1733844993920/Put/seqid=0 2024-12-10T15:36:34,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741980_1156 (size=12151) 2024-12-10T15:36:34,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/567e877889994ac48d0d985f043cecf9 2024-12-10T15:36:34,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845054555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845054558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845054560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845054570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/243dee43af58445790bb09707ce7e36d is 50, key is test_row_0/C:col10/1733844993920/Put/seqid=0 2024-12-10T15:36:34,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:34,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845054573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,608 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T15:36:34,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:34,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:34,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741981_1157 (size=12151) 2024-12-10T15:36:34,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/243dee43af58445790bb09707ce7e36d 2024-12-10T15:36:34,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/fc9dd20615df43b49e08a03bf06c2c78 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78 2024-12-10T15:36:34,647 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78, entries=150, sequenceid=161, filesize=30.4 K 2024-12-10T15:36:34,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/567e877889994ac48d0d985f043cecf9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9 2024-12-10T15:36:34,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9, entries=150, sequenceid=161, filesize=11.9 K 2024-12-10T15:36:34,669 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/243dee43af58445790bb09707ce7e36d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d 2024-12-10T15:36:34,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d, entries=150, sequenceid=161, filesize=11.9 K 2024-12-10T15:36:34,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 94b551ecd4747174537fcd83980a419f in 756ms, sequenceid=161, compaction requested=false 2024-12-10T15:36:34,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T15:36:34,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:34,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-10T15:36:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:34,764 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-10T15:36:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:34,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:34,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:34,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:34,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:34,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210305aee5fa6c54160b51824c8c9b677a4_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844993936/Put/seqid=0 2024-12-10T15:36:34,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741982_1158 (size=12304) 2024-12-10T15:36:34,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:34,812 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210305aee5fa6c54160b51824c8c9b677a4_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210305aee5fa6c54160b51824c8c9b677a4_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:34,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/5e93c3ce3f8c451dbd9a452149e6b693, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:34,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/5e93c3ce3f8c451dbd9a452149e6b693 is 175, key is test_row_0/A:col10/1733844993936/Put/seqid=0 2024-12-10T15:36:34,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741983_1159 (size=31105) 2024-12-10T15:36:34,856 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/5e93c3ce3f8c451dbd9a452149e6b693 2024-12-10T15:36:34,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/296398fbc52a4f588a649430a6d151fe is 50, key is test_row_0/B:col10/1733844993936/Put/seqid=0 2024-12-10T15:36:34,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741984_1160 (size=12151) 2024-12-10T15:36:34,938 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/296398fbc52a4f588a649430a6d151fe 2024-12-10T15:36:34,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/caeb8d9a5fda4c11b769ccd554a487d2 is 50, key is test_row_0/C:col10/1733844993936/Put/seqid=0 2024-12-10T15:36:34,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741985_1161 (size=12151) 2024-12-10T15:36:34,994 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/caeb8d9a5fda4c11b769ccd554a487d2 2024-12-10T15:36:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/5e93c3ce3f8c451dbd9a452149e6b693 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693 2024-12-10T15:36:35,005 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693, entries=150, sequenceid=172, filesize=30.4 K 2024-12-10T15:36:35,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/296398fbc52a4f588a649430a6d151fe as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe 2024-12-10T15:36:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,013 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe, entries=150, sequenceid=172, filesize=11.9 K 2024-12-10T15:36:35,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/caeb8d9a5fda4c11b769ccd554a487d2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2 2024-12-10T15:36:35,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,021 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2, entries=150, sequenceid=172, filesize=11.9 K 2024-12-10T15:36:35,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,025 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 94b551ecd4747174537fcd83980a419f in 261ms, sequenceid=172, compaction requested=true 2024-12-10T15:36:35,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:35,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-10T15:36:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-10T15:36:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-10T15:36:35,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 886 msec 2024-12-10T15:36:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 893 msec 2024-12-10T15:36:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-10T15:36:35,252 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-10T15:36:35,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:35,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-10T15:36:35,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,258 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:35,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,260 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:35,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:35,260 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:35,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:35,261 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:35,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101fa9eb6c0d3f42259140e8ffff4e2dab_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:35,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845055346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845055347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:35,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741986_1162 (size=17284) 2024-12-10T15:36:35,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845055360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845055367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845055379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,412 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:35,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:35,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,413 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845055461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845055461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845055482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845055480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845055484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:35,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:35,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845055669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845055670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845055692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845055692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845055699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,730 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:35,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:35,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,757 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:35,788 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101fa9eb6c0d3f42259140e8ffff4e2dab_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101fa9eb6c0d3f42259140e8ffff4e2dab_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:35,789 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/f38e9961fd5e4bfba0529c43d9c06572, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:35,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/f38e9961fd5e4bfba0529c43d9c06572 is 175, key is test_row_0/A:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741987_1163 (size=48389) 2024-12-10T15:36:35,828 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=183, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/f38e9961fd5e4bfba0529c43d9c06572 2024-12-10T15:36:35,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:35,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0e9f7c6d0f86476098f0b07884a17f88 is 50, key is test_row_0/B:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:35,885 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,885 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:35,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:35,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:35,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741988_1164 (size=12151) 2024-12-10T15:36:35,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845055981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:35,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845055984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845055996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845055997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845056009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:36,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:36,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:36,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:36,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0e9f7c6d0f86476098f0b07884a17f88 2024-12-10T15:36:36,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:36,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:36,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/94eb0e5bfb1447aa8151e199c996081d is 50, key is test_row_0/C:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:36,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741989_1165 (size=12151) 2024-12-10T15:36:36,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=183 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/94eb0e5bfb1447aa8151e199c996081d 2024-12-10T15:36:36,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/f38e9961fd5e4bfba0529c43d9c06572 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572 2024-12-10T15:36:36,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845056485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,500 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845056495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,505 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:36,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:36,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:36,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572, entries=250, sequenceid=183, filesize=47.3 K 2024-12-10T15:36:36,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845056506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845056509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/0e9f7c6d0f86476098f0b07884a17f88 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88 2024-12-10T15:36:36,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845056514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88, entries=150, sequenceid=183, filesize=11.9 K 2024-12-10T15:36:36,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/94eb0e5bfb1447aa8151e199c996081d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d 2024-12-10T15:36:36,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d, entries=150, sequenceid=183, filesize=11.9 K 2024-12-10T15:36:36,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 94b551ecd4747174537fcd83980a419f in 1318ms, sequenceid=183, compaction requested=true 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:36,579 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:36,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:36,579 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:36,584 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:36,584 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:36,584 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,584 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=138.6 K 2024-12-10T15:36:36,585 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,585 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572] 2024-12-10T15:36:36,586 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:36,586 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 83ea3f03ed964ac6b1ee5d063ba4f4ac, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:36,586 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:36,586 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,586 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/88667b6cb33540d7b976f21e690d22f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=47.7 K 2024-12-10T15:36:36,586 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fc9dd20615df43b49e08a03bf06c2c78, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733844993597 2024-12-10T15:36:36,586 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88667b6cb33540d7b976f21e690d22f7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:36,587 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e93c3ce3f8c451dbd9a452149e6b693, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733844993936 2024-12-10T15:36:36,588 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 567e877889994ac48d0d985f043cecf9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733844993597 2024-12-10T15:36:36,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f38e9961fd5e4bfba0529c43d9c06572, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:36,589 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 296398fbc52a4f588a649430a6d151fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733844993936 2024-12-10T15:36:36,590 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e9f7c6d0f86476098f0b07884a17f88, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:36,614 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:36,615 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/412268f21ba941859580c20d035b0805 is 50, key is test_row_0/B:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:36,623 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:36,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741990_1166 (size=12595) 2024-12-10T15:36:36,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412104efb651d37754a5797d647dff4cb2748_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:36,659 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412104efb651d37754a5797d647dff4cb2748_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:36,659 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104efb651d37754a5797d647dff4cb2748_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:36,659 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:36,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-10T15:36:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,660 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:36,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:36,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:36,663 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/412268f21ba941859580c20d035b0805 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/412268f21ba941859580c20d035b0805 2024-12-10T15:36:36,673 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into 412268f21ba941859580c20d035b0805(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:36,673 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:36,673 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=12, startTime=1733844996579; duration=0sec 2024-12-10T15:36:36,673 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:36,673 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:36,673 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:36,674 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:36,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:36,675 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:36,675 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6e1fcd987c3b46d39da9fb3e93d69b0f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=47.7 K 2024-12-10T15:36:36,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e1fcd987c3b46d39da9fb3e93d69b0f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733844992408 2024-12-10T15:36:36,677 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 243dee43af58445790bb09707ce7e36d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733844993597 2024-12-10T15:36:36,678 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting caeb8d9a5fda4c11b769ccd554a487d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733844993936 2024-12-10T15:36:36,678 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94eb0e5bfb1447aa8151e199c996081d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:36,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104862441ee0c64ef19b7627dbb3f67fd8_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844995352/Put/seqid=0 2024-12-10T15:36:36,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741991_1167 (size=4469) 2024-12-10T15:36:36,717 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:36,718 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/2517c7e4d61c4967a6069a985a1c49d0 is 50, key is test_row_0/C:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:36,719 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#136 average throughput is 0.25 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:36,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/0857035d820d48f6907d3a080ddbd4ba is 175, key is test_row_0/A:col10/1733844995176/Put/seqid=0 2024-12-10T15:36:36,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741994_1170 (size=31549) 2024-12-10T15:36:36,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741992_1168 (size=12304) 2024-12-10T15:36:36,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:36,784 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104862441ee0c64ef19b7627dbb3f67fd8_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104862441ee0c64ef19b7627dbb3f67fd8_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:36,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/febfde6d8f384c1d95fff4b41dddf8e1, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:36,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/febfde6d8f384c1d95fff4b41dddf8e1 is 175, key is test_row_0/A:col10/1733844995352/Put/seqid=0 2024-12-10T15:36:36,794 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/0857035d820d48f6907d3a080ddbd4ba as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba 2024-12-10T15:36:36,800 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 0857035d820d48f6907d3a080ddbd4ba(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:36,800 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:36,800 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=12, startTime=1733844996579; duration=0sec 2024-12-10T15:36:36,800 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:36,800 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:36,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741995_1171 (size=31105) 2024-12-10T15:36:36,817 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/febfde6d8f384c1d95fff4b41dddf8e1 2024-12-10T15:36:36,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741993_1169 (size=12595) 2024-12-10T15:36:36,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/71072896258d4d29bf0831a3215d0e21 is 50, key is test_row_0/B:col10/1733844995352/Put/seqid=0 2024-12-10T15:36:36,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741996_1172 (size=12151) 2024-12-10T15:36:37,227 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/2517c7e4d61c4967a6069a985a1c49d0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2517c7e4d61c4967a6069a985a1c49d0 2024-12-10T15:36:37,233 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 2517c7e4d61c4967a6069a985a1c49d0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:37,233 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:37,233 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=12, startTime=1733844996579; duration=0sec 2024-12-10T15:36:37,233 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:37,233 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:37,294 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/71072896258d4d29bf0831a3215d0e21 2024-12-10T15:36:37,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aec8bec58ec542f1a2b5476dc08f7a87 is 50, key is test_row_0/C:col10/1733844995352/Put/seqid=0 2024-12-10T15:36:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741997_1173 (size=12151) 2024-12-10T15:36:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:37,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:37,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:37,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845057524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845057531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845057531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845057533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845057533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845057640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845057642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845057642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845057647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,758 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aec8bec58ec542f1a2b5476dc08f7a87 2024-12-10T15:36:37,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/febfde6d8f384c1d95fff4b41dddf8e1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1 2024-12-10T15:36:37,772 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1, entries=150, sequenceid=209, filesize=30.4 K 2024-12-10T15:36:37,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/71072896258d4d29bf0831a3215d0e21 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21 2024-12-10T15:36:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,782 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21, entries=150, sequenceid=209, filesize=11.9 K 2024-12-10T15:36:37,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/aec8bec58ec542f1a2b5476dc08f7a87 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87 2024-12-10T15:36:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,815 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87, entries=150, sequenceid=209, filesize=11.9 K 2024-12-10T15:36:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,816 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 94b551ecd4747174537fcd83980a419f in 1156ms, sequenceid=209, compaction requested=false 2024-12-10T15:36:37,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:37,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:37,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-10T15:36:37,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-10T15:36:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-10T15:36:37,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5570 sec 2024-12-10T15:36:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.5640 sec 2024-12-10T15:36:37,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:37,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:37,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:37,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:37,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210944834097b9b46c3ab23226a0aa79193_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845057902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845057903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845057905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845057905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:37,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741998_1174 (size=12304) 2024-12-10T15:36:37,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:37,944 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210944834097b9b46c3ab23226a0aa79193_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210944834097b9b46c3ab23226a0aa79193_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:37,947 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/1c1143ac18bc4467a55a1037a7183a14, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:37,948 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/1c1143ac18bc4467a55a1037a7183a14 is 175, key is test_row_0/A:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:37,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741999_1175 (size=31101) 2024-12-10T15:36:38,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845058010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845058012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,014 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845058013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845058015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845058214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845058216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845058216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845058216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,380 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=223, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/1c1143ac18bc4467a55a1037a7183a14 2024-12-10T15:36:38,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43d51662a8f4449cb2d539a08b77a865 is 50, key is test_row_0/B:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:38,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742000_1176 (size=9757) 2024-12-10T15:36:38,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43d51662a8f4449cb2d539a08b77a865 2024-12-10T15:36:38,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/381a85df9cef41dab9fc6ff7a7de5289 is 50, key is test_row_0/C:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:38,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845058524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742001_1177 (size=9757) 2024-12-10T15:36:38,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/381a85df9cef41dab9fc6ff7a7de5289 2024-12-10T15:36:38,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/1c1143ac18bc4467a55a1037a7183a14 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14 2024-12-10T15:36:38,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845058590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,594 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845058592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:38,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845058595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:38,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14, entries=150, sequenceid=223, filesize=30.4 K 2024-12-10T15:36:38,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/43d51662a8f4449cb2d539a08b77a865 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865 2024-12-10T15:36:38,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865, entries=100, sequenceid=223, filesize=9.5 K 2024-12-10T15:36:38,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/381a85df9cef41dab9fc6ff7a7de5289 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289 2024-12-10T15:36:38,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289, entries=100, sequenceid=223, filesize=9.5 K 2024-12-10T15:36:38,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 94b551ecd4747174537fcd83980a419f in 797ms, sequenceid=223, compaction requested=true 2024-12-10T15:36:38,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:38,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:38,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:36:38,644 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:38,645 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:38,645 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:38,646 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:38,646 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=91.6 K 2024-12-10T15:36:38,646 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:38,646 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14] 2024-12-10T15:36:38,647 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0857035d820d48f6907d3a080ddbd4ba, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:38,647 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting febfde6d8f384c1d95fff4b41dddf8e1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733844995352 2024-12-10T15:36:38,648 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1143ac18bc4467a55a1037a7183a14, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844997522 2024-12-10T15:36:38,651 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:38,654 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:38,654 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:38,654 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:38,654 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2517c7e4d61c4967a6069a985a1c49d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=33.7 K 2024-12-10T15:36:38,655 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2517c7e4d61c4967a6069a985a1c49d0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:38,656 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting aec8bec58ec542f1a2b5476dc08f7a87, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733844995352 2024-12-10T15:36:38,657 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 381a85df9cef41dab9fc6ff7a7de5289, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844997522 2024-12-10T15:36:38,658 INFO [master/bf0fec90ff6d:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-10T15:36:38,658 INFO [master/bf0fec90ff6d:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-10T15:36:38,664 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:38,669 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#145 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:38,669 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/89f51b9d0944452eb1f8935f072fc027 is 50, key is test_row_0/C:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:38,676 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210ccae8bee603c44919fa12f0add31c8be_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:38,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210ccae8bee603c44919fa12f0add31c8be_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:38,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ccae8bee603c44919fa12f0add31c8be_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:38,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742002_1178 (size=12697) 2024-12-10T15:36:38,731 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/89f51b9d0944452eb1f8935f072fc027 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/89f51b9d0944452eb1f8935f072fc027 2024-12-10T15:36:38,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742003_1179 (size=4469) 2024-12-10T15:36:38,741 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#144 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:38,741 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7af34ba65dd24a2d94270e11430d2a50 is 175, key is test_row_0/A:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:38,745 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 89f51b9d0944452eb1f8935f072fc027(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:38,745 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:38,745 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=13, startTime=1733844998644; duration=0sec 2024-12-10T15:36:38,745 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:38,745 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:38,745 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:38,755 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:38,755 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:38,755 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:38,755 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/412268f21ba941859580c20d035b0805, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=33.7 K 2024-12-10T15:36:38,759 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 412268f21ba941859580c20d035b0805, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=183, earliestPutTs=1733844995176 2024-12-10T15:36:38,760 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71072896258d4d29bf0831a3215d0e21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733844995352 2024-12-10T15:36:38,760 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43d51662a8f4449cb2d539a08b77a865, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844997522 2024-12-10T15:36:38,772 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#146 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:38,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742004_1180 (size=31758) 2024-12-10T15:36:38,772 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/ed89e24aee334ad99e8f3a1fb55a6b2f is 50, key is test_row_0/B:col10/1733844997522/Put/seqid=0 2024-12-10T15:36:38,781 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7af34ba65dd24a2d94270e11430d2a50 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50 2024-12-10T15:36:38,788 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 7af34ba65dd24a2d94270e11430d2a50(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:38,788 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:38,788 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=13, startTime=1733844998643; duration=0sec 2024-12-10T15:36:38,788 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:38,788 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:38,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742005_1181 (size=12697) 2024-12-10T15:36:38,815 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/ed89e24aee334ad99e8f3a1fb55a6b2f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/ed89e24aee334ad99e8f3a1fb55a6b2f 2024-12-10T15:36:38,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into ed89e24aee334ad99e8f3a1fb55a6b2f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:38,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:38,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=13, startTime=1733844998644; duration=0sec 2024-12-10T15:36:38,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:38,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:39,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:39,040 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:39,040 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b610132769354b0c86866c010bb6d5be_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844999028/Put/seqid=0 2024-12-10T15:36:39,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742006_1182 (size=14794) 2024-12-10T15:36:39,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845059086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845059097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845059100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845059107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845059197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-10T15:36:39,369 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-10T15:36:39,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:39,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-10T15:36:39,374 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:39,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:39,375 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:39,375 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:39,400 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845059399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:39,489 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:39,500 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b610132769354b0c86866c010bb6d5be_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b610132769354b0c86866c010bb6d5be_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:39,501 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/05965da93cbd49a6b80d3c910b753cd5, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:39,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/05965da93cbd49a6b80d3c910b753cd5 is 175, key is test_row_0/A:col10/1733844999028/Put/seqid=0 2024-12-10T15:36:39,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T15:36:39,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:39,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:39,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:39,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845059540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,552 DEBUG [Thread-575 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4191 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:36:39,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742007_1183 (size=39749) 2024-12-10T15:36:39,556 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/05965da93cbd49a6b80d3c910b753cd5 2024-12-10T15:36:39,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/86061e04355f4826b16b85fe0dacbb72 is 50, key is test_row_0/B:col10/1733844999028/Put/seqid=0 2024-12-10T15:36:39,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742008_1184 (size=12151) 2024-12-10T15:36:39,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/86061e04355f4826b16b85fe0dacbb72 2024-12-10T15:36:39,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/13ceb6adee3c4b8ab5a7bcf2598d601a is 50, key is test_row_0/C:col10/1733844999028/Put/seqid=0 2024-12-10T15:36:39,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:39,687 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T15:36:39,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:39,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:39,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:39,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:39,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742009_1185 (size=12151) 2024-12-10T15:36:39,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/13ceb6adee3c4b8ab5a7bcf2598d601a 2024-12-10T15:36:39,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:39,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845059704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/05965da93cbd49a6b80d3c910b753cd5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5 2024-12-10T15:36:39,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5, entries=200, sequenceid=253, filesize=38.8 K 2024-12-10T15:36:39,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/86061e04355f4826b16b85fe0dacbb72 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72 2024-12-10T15:36:39,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72, entries=150, sequenceid=253, filesize=11.9 K 2024-12-10T15:36:39,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/13ceb6adee3c4b8ab5a7bcf2598d601a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a 2024-12-10T15:36:39,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a, entries=150, sequenceid=253, filesize=11.9 K 2024-12-10T15:36:39,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 94b551ecd4747174537fcd83980a419f in 743ms, sequenceid=253, compaction requested=false 2024-12-10T15:36:39,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:39,844 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:39,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-10T15:36:39,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:39,845 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-10T15:36:39,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:39,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:39,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:39,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:39,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108bfe4883e9d54aeba73652ff9d7a1da3_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733844999075/Put/seqid=0 2024-12-10T15:36:39,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742010_1186 (size=12454) 2024-12-10T15:36:39,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:39,941 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108bfe4883e9d54aeba73652ff9d7a1da3_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108bfe4883e9d54aeba73652ff9d7a1da3_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:39,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/80d4551888934e94a9193ea105d2d00e, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:39,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/80d4551888934e94a9193ea105d2d00e is 175, key is test_row_0/A:col10/1733844999075/Put/seqid=0 2024-12-10T15:36:39,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742011_1187 (size=31255) 2024-12-10T15:36:39,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:40,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:40,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845060137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845060138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845060140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845060215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845060240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845060240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,245 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845060243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,356 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/80d4551888934e94a9193ea105d2d00e 2024-12-10T15:36:40,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/aa2f323696534a67b8f55172cefd4980 is 50, key is test_row_0/B:col10/1733844999075/Put/seqid=0 2024-12-10T15:36:40,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742012_1188 (size=12301) 2024-12-10T15:36:40,412 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/aa2f323696534a67b8f55172cefd4980 2024-12-10T15:36:40,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/707ad748debd4aa58011df173fb3ce76 is 50, key is test_row_0/C:col10/1733844999075/Put/seqid=0 2024-12-10T15:36:40,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845060444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845060444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845060447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742013_1189 (size=12301) 2024-12-10T15:36:40,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:40,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845060746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845060750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845060750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:40,860 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/707ad748debd4aa58011df173fb3ce76 2024-12-10T15:36:40,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/80d4551888934e94a9193ea105d2d00e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e 2024-12-10T15:36:40,874 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e, entries=150, sequenceid=262, filesize=30.5 K 2024-12-10T15:36:40,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/aa2f323696534a67b8f55172cefd4980 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980 2024-12-10T15:36:40,881 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T15:36:40,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/707ad748debd4aa58011df173fb3ce76 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76 2024-12-10T15:36:40,889 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76, entries=150, sequenceid=262, filesize=12.0 K 2024-12-10T15:36:40,890 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 94b551ecd4747174537fcd83980a419f in 1045ms, sequenceid=262, compaction requested=true 2024-12-10T15:36:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-10T15:36:40,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-10T15:36:40,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-10T15:36:40,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5160 sec 2024-12-10T15:36:40,895 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.5220 sec 2024-12-10T15:36:41,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:41,230 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:41,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:41,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210957406d25c344e028bb518aac838788d_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:41,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845061256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845061256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845061257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845061258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742014_1190 (size=14994) 2024-12-10T15:36:41,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845061362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-10T15:36:41,481 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-10T15:36:41,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-10T15:36:41,489 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:41,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:41,490 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:41,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:41,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845061572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:41,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:41,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:41,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,711 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:41,733 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210957406d25c344e028bb518aac838788d_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210957406d25c344e028bb518aac838788d_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:41,735 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/adc141272ff74311aa31536b6dbf1322, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:41,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/adc141272ff74311aa31536b6dbf1322 is 175, key is test_row_0/A:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:41,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742015_1191 (size=39949) 2024-12-10T15:36:41,780 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=290, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/adc141272ff74311aa31536b6dbf1322 2024-12-10T15:36:41,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:41,802 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:41,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:41,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/04d3543e489b44c5983265a877425509 is 50, key is test_row_0/B:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:41,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742016_1192 (size=12301) 2024-12-10T15:36:41,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845061883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,972 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:41,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:41,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:42,128 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:42,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845062267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:42,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845062271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:42,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845062276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,284 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:42,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:42,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/04d3543e489b44c5983265a877425509 2024-12-10T15:36:42,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/dafcb760e05a432093d19fef30f97f73 is 50, key is test_row_0/C:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:42,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742017_1193 (size=12301) 2024-12-10T15:36:42,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/dafcb760e05a432093d19fef30f97f73 2024-12-10T15:36:42,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/adc141272ff74311aa31536b6dbf1322 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322 2024-12-10T15:36:42,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322, entries=200, sequenceid=290, filesize=39.0 K 2024-12-10T15:36:42,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/04d3543e489b44c5983265a877425509 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509 2024-12-10T15:36:42,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T15:36:42,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/dafcb760e05a432093d19fef30f97f73 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73 2024-12-10T15:36:42,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73, entries=150, sequenceid=290, filesize=12.0 K 2024-12-10T15:36:42,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 94b551ecd4747174537fcd83980a419f in 1153ms, sequenceid=290, compaction requested=true 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:42,383 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:42,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:42,383 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:42,385 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142711 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:42,385 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:42,385 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,385 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=139.4 K 2024-12-10T15:36:42,385 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,385 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322] 2024-12-10T15:36:42,385 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:42,385 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:42,385 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,386 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/ed89e24aee334ad99e8f3a1fb55a6b2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=48.3 K 2024-12-10T15:36:42,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ed89e24aee334ad99e8f3a1fb55a6b2f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844995354 2024-12-10T15:36:42,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 86061e04355f4826b16b85fe0dacbb72, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733844997893 2024-12-10T15:36:42,386 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting aa2f323696534a67b8f55172cefd4980, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733844999062 2024-12-10T15:36:42,387 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 04d3543e489b44c5983265a877425509, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:42,389 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7af34ba65dd24a2d94270e11430d2a50, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844995354 2024-12-10T15:36:42,392 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05965da93cbd49a6b80d3c910b753cd5, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733844997893 2024-12-10T15:36:42,393 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80d4551888934e94a9193ea105d2d00e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733844999062 2024-12-10T15:36:42,394 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting adc141272ff74311aa31536b6dbf1322, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:42,397 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:42,398 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/f2abc4bd189643329cd81d48654b9616 is 50, key is test_row_0/B:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:42,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:42,402 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:42,402 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:42,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:42,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,428 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109292ad780c994a56a1cb0bca89c0097a_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845001255/Put/seqid=0 2024-12-10T15:36:42,440 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742018_1194 (size=12983) 2024-12-10T15:36:42,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:42,443 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210f3f6a266b27442deb1acdeb12087cbc7_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:42,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,450 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210f3f6a266b27442deb1acdeb12087cbc7_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,450 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f3f6a266b27442deb1acdeb12087cbc7_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,453 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/f2abc4bd189643329cd81d48654b9616 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/f2abc4bd189643329cd81d48654b9616 2024-12-10T15:36:42,463 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into f2abc4bd189643329cd81d48654b9616(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:42,463 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:42,463 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=12, startTime=1733845002383; duration=0sec 2024-12-10T15:36:42,463 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:42,463 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:42,463 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:42,465 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:42,465 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:42,465 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,465 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/89f51b9d0944452eb1f8935f072fc027, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=48.3 K 2024-12-10T15:36:42,466 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 89f51b9d0944452eb1f8935f072fc027, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733844995354 2024-12-10T15:36:42,467 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 13ceb6adee3c4b8ab5a7bcf2598d601a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733844997893 2024-12-10T15:36:42,467 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 707ad748debd4aa58011df173fb3ce76, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733844999062 2024-12-10T15:36:42,468 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting dafcb760e05a432093d19fef30f97f73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:42,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742019_1195 (size=12454) 2024-12-10T15:36:42,490 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:42,493 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:42,494 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/f216178dfaf94430a65595f56d6d6447 is 50, key is test_row_0/C:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:42,499 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109292ad780c994a56a1cb0bca89c0097a_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109292ad780c994a56a1cb0bca89c0097a_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:42,501 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/78a1fc7614fe44e694d4daeab4e4dcfa, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/78a1fc7614fe44e694d4daeab4e4dcfa is 175, key is test_row_0/A:col10/1733845001255/Put/seqid=0 2024-12-10T15:36:42,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742022_1198 (size=31255) 2024-12-10T15:36:42,544 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/78a1fc7614fe44e694d4daeab4e4dcfa 2024-12-10T15:36:42,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742020_1196 (size=4469) 2024-12-10T15:36:42,550 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#157 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:42,551 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7ed1dd66c7954daaa9dc69eaab34716d is 175, key is test_row_0/A:col10/1733845001228/Put/seqid=0 2024-12-10T15:36:42,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/83100efbcc564e03b0e76598dee3e76c is 50, key is test_row_0/B:col10/1733845001255/Put/seqid=0 2024-12-10T15:36:42,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742021_1197 (size=12983) 2024-12-10T15:36:42,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:42,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:42,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:42,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:42,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:42,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845062640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742023_1199 (size=31937) 2024-12-10T15:36:42,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742024_1200 (size=12301) 2024-12-10T15:36:42,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/83100efbcc564e03b0e76598dee3e76c 2024-12-10T15:36:42,697 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/6ce2012b728c47bdaea3295837fefcde is 50, key is test_row_0/C:col10/1733845001255/Put/seqid=0 2024-12-10T15:36:42,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742025_1201 (size=12301) 2024-12-10T15:36:42,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/6ce2012b728c47bdaea3295837fefcde 2024-12-10T15:36:42,741 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/78a1fc7614fe44e694d4daeab4e4dcfa as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa 2024-12-10T15:36:42,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa, entries=150, sequenceid=301, filesize=30.5 K 2024-12-10T15:36:42,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/83100efbcc564e03b0e76598dee3e76c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c 2024-12-10T15:36:42,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845062744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T15:36:42,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/6ce2012b728c47bdaea3295837fefcde as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde 2024-12-10T15:36:42,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T15:36:42,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 94b551ecd4747174537fcd83980a419f in 364ms, sequenceid=301, compaction requested=false 2024-12-10T15:36:42,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:42,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:42,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-10T15:36:42,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:42,787 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:42,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:42,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210013b2867a05f4d2cb32eaa1235b737fa_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845002622/Put/seqid=0 2024-12-10T15:36:42,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742026_1202 (size=12454) 2024-12-10T15:36:42,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:42,862 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210013b2867a05f4d2cb32eaa1235b737fa_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210013b2867a05f4d2cb32eaa1235b737fa_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:42,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/b99fa4f522b2401090a36300183133ec, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:42,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/b99fa4f522b2401090a36300183133ec is 175, key is test_row_0/A:col10/1733845002622/Put/seqid=0 2024-12-10T15:36:42,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742027_1203 (size=31255) 2024-12-10T15:36:42,913 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/b99fa4f522b2401090a36300183133ec 2024-12-10T15:36:42,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/e06fd3018bb9487c996a00787ff78183 is 50, key is test_row_0/B:col10/1733845002622/Put/seqid=0 2024-12-10T15:36:42,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:42,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:42,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742028_1204 (size=12301) 2024-12-10T15:36:42,985 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/e06fd3018bb9487c996a00787ff78183 2024-12-10T15:36:43,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/c4e35ca8f22f467290be5731fa467f1e is 50, key is test_row_0/C:col10/1733845002622/Put/seqid=0 2024-12-10T15:36:43,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845063047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,060 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/f216178dfaf94430a65595f56d6d6447 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f216178dfaf94430a65595f56d6d6447 2024-12-10T15:36:43,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742029_1205 (size=12301) 2024-12-10T15:36:43,100 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7ed1dd66c7954daaa9dc69eaab34716d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d 2024-12-10T15:36:43,102 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into f216178dfaf94430a65595f56d6d6447(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:43,102 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:43,102 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=12, startTime=1733845002383; duration=0sec 2024-12-10T15:36:43,102 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:43,102 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:43,129 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 7ed1dd66c7954daaa9dc69eaab34716d(size=31.2 K), total size for store is 61.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:43,129 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:43,130 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=12, startTime=1733845002383; duration=0sec 2024-12-10T15:36:43,130 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:43,130 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:43,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845063150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845063356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,471 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/c4e35ca8f22f467290be5731fa467f1e 2024-12-10T15:36:43,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/b99fa4f522b2401090a36300183133ec as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec 2024-12-10T15:36:43,545 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec, entries=150, sequenceid=327, filesize=30.5 K 2024-12-10T15:36:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/e06fd3018bb9487c996a00787ff78183 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183 2024-12-10T15:36:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,574 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183, entries=150, sequenceid=327, filesize=12.0 K 2024-12-10T15:36:43,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/c4e35ca8f22f467290be5731fa467f1e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e 2024-12-10T15:36:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,589 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e, entries=150, sequenceid=327, filesize=12.0 K 2024-12-10T15:36:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,592 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 94b551ecd4747174537fcd83980a419f in 804ms, sequenceid=327, compaction requested=true 2024-12-10T15:36:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:43,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-10T15:36:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-10T15:36:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-10T15:36:43,603 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1110 sec 2024-12-10T15:36:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.1170 sec 2024-12-10T15:36:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-10T15:36:43,631 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-10T15:36:43,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-10T15:36:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,641 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,642 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:43,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:43,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:43,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:43,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108cdbaf72ed3c4413b8ce6b7e9522978d_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:43,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742030_1206 (size=12454) 2024-12-10T15:36:43,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,708 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,718 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108cdbaf72ed3c4413b8ce6b7e9522978d_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108cdbaf72ed3c4413b8ce6b7e9522978d_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:43,719 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c0bfb16deee54402b229b5fd71e14a7a, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:43,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c0bfb16deee54402b229b5fd71e14a7a is 175, key is test_row_0/A:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:43,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845063748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845063749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742031_1207 (size=31255) 2024-12-10T15:36:43,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:43,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T15:36:43,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:43,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:43,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:43,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845063853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:43,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845063857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:43,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T15:36:43,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:43,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:43,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:43,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:43,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:44,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845064055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845064076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,119 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T15:36:44,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:44,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:44,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:44,158 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=341, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c0bfb16deee54402b229b5fd71e14a7a 2024-12-10T15:36:44,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/256657f0677b43e8b4acc3195889ccb8 is 50, key is test_row_0/B:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:44,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742032_1208 (size=12301) 2024-12-10T15:36:44,186 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/256657f0677b43e8b4acc3195889ccb8 2024-12-10T15:36:44,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/7b30e5bb331f4dfe85229a006ab60cf8 is 50, key is test_row_0/C:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:44,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742033_1209 (size=12301) 2024-12-10T15:36:44,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/7b30e5bb331f4dfe85229a006ab60cf8 2024-12-10T15:36:44,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c0bfb16deee54402b229b5fd71e14a7a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a 2024-12-10T15:36:44,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a, entries=150, sequenceid=341, filesize=30.5 K 2024-12-10T15:36:44,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/256657f0677b43e8b4acc3195889ccb8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8 2024-12-10T15:36:44,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8, entries=150, sequenceid=341, filesize=12.0 K 2024-12-10T15:36:44,228 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/7b30e5bb331f4dfe85229a006ab60cf8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8 2024-12-10T15:36:44,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8, entries=150, sequenceid=341, filesize=12.0 K 2024-12-10T15:36:44,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 94b551ecd4747174537fcd83980a419f in 579ms, sequenceid=341, compaction requested=true 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:44,235 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:44,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:44,236 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:44,237 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:44,237 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:44,237 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,237 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=122.8 K 2024-12-10T15:36:44,237 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,237 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a] 2024-12-10T15:36:44,238 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ed1dd66c7954daaa9dc69eaab34716d, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:44,238 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:44,238 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:44,238 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,238 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/f2abc4bd189643329cd81d48654b9616, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=48.7 K 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78a1fc7614fe44e694d4daeab4e4dcfa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845001255 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f2abc4bd189643329cd81d48654b9616, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b99fa4f522b2401090a36300183133ec, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733845002560 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 83100efbcc564e03b0e76598dee3e76c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845001255 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0bfb16deee54402b229b5fd71e14a7a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:44,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e06fd3018bb9487c996a00787ff78183, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733845002560 2024-12-10T15:36:44,240 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 256657f0677b43e8b4acc3195889ccb8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:44,253 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:44,255 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#169 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:44,255 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/64a71a3e962d4b248fef029f0d32b8a2 is 50, key is test_row_0/B:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:44,263 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210568679feb9154de9a5a358ad83846cf7_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:44,265 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210568679feb9154de9a5a358ad83846cf7_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:44,265 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210568679feb9154de9a5a358ad83846cf7_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:44,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:44,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-10T15:36:44,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,276 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:44,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:44,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:44,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:44,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742034_1210 (size=13119) 2024-12-10T15:36:44,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845064295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845064300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845064301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742035_1211 (size=4469) 2024-12-10T15:36:44,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210abcdf9b245ed4902ac6b049f6941301c_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845003748/Put/seqid=0 2024-12-10T15:36:44,311 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#168 average throughput is 0.42 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:44,312 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/45cddd40764b417996e9663c9923fdee is 175, key is test_row_0/A:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:44,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742036_1212 (size=12454) 2024-12-10T15:36:44,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742037_1213 (size=32073) 2024-12-10T15:36:44,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845064359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845064377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845064404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845064402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845064404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845064608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845064608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,618 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845064615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,707 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/64a71a3e962d4b248fef029f0d32b8a2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/64a71a3e962d4b248fef029f0d32b8a2 2024-12-10T15:36:44,713 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into 64a71a3e962d4b248fef029f0d32b8a2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:44,713 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:44,713 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=12, startTime=1733845004235; duration=0sec 2024-12-10T15:36:44,713 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:44,713 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:44,713 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:44,716 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:44,717 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:44,717 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:44,717 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f216178dfaf94430a65595f56d6d6447, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=48.7 K 2024-12-10T15:36:44,717 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f216178dfaf94430a65595f56d6d6447, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1733845000128 2024-12-10T15:36:44,717 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ce2012b728c47bdaea3295837fefcde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845001255 2024-12-10T15:36:44,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c4e35ca8f22f467290be5731fa467f1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733845002560 2024-12-10T15:36:44,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b30e5bb331f4dfe85229a006ab60cf8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:44,730 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:44,731 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/7a218c879e4f4f0ea68d42a94dd1d48d is 50, key is test_row_0/C:col10/1733845003654/Put/seqid=0 2024-12-10T15:36:44,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:44,736 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210abcdf9b245ed4902ac6b049f6941301c_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210abcdf9b245ed4902ac6b049f6941301c_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:44,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/6cd7fcef9d67487899a60cc73097d268, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:44,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/6cd7fcef9d67487899a60cc73097d268 is 175, key is test_row_0/A:col10/1733845003748/Put/seqid=0 2024-12-10T15:36:44,744 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/45cddd40764b417996e9663c9923fdee as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee 2024-12-10T15:36:44,757 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 45cddd40764b417996e9663c9923fdee(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:44,757 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:44,757 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=12, startTime=1733845004235; duration=0sec 2024-12-10T15:36:44,757 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:44,757 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:44,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:44,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742038_1214 (size=31255) 2024-12-10T15:36:44,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742039_1215 (size=13119) 2024-12-10T15:36:44,813 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/7a218c879e4f4f0ea68d42a94dd1d48d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7a218c879e4f4f0ea68d42a94dd1d48d 2024-12-10T15:36:44,819 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 7a218c879e4f4f0ea68d42a94dd1d48d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:44,819 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:44,819 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=12, startTime=1733845004235; duration=0sec 2024-12-10T15:36:44,819 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:44,819 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:44,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845064868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845064882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845064912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845064912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:44,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:44,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845064931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,171 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/6cd7fcef9d67487899a60cc73097d268 2024-12-10T15:36:45,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8616e4f20be04fa7ae0b34134483656f is 50, key is test_row_0/B:col10/1733845003748/Put/seqid=0 2024-12-10T15:36:45,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742040_1216 (size=12301) 2024-12-10T15:36:45,199 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8616e4f20be04fa7ae0b34134483656f 2024-12-10T15:36:45,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/a91560f14e6d430bac71a3a3afbd40bf is 50, key is test_row_0/C:col10/1733845003748/Put/seqid=0 2024-12-10T15:36:45,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742041_1217 (size=12301) 2024-12-10T15:36:45,219 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/a91560f14e6d430bac71a3a3afbd40bf 2024-12-10T15:36:45,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/6cd7fcef9d67487899a60cc73097d268 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268 2024-12-10T15:36:45,231 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268, entries=150, sequenceid=365, filesize=30.5 K 2024-12-10T15:36:45,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/8616e4f20be04fa7ae0b34134483656f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f 2024-12-10T15:36:45,241 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f, entries=150, sequenceid=365, filesize=12.0 K 2024-12-10T15:36:45,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/a91560f14e6d430bac71a3a3afbd40bf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf 2024-12-10T15:36:45,246 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf, entries=150, sequenceid=365, filesize=12.0 K 2024-12-10T15:36:45,252 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 94b551ecd4747174537fcd83980a419f in 976ms, sequenceid=365, compaction requested=false 2024-12-10T15:36:45,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:45,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:45,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-10T15:36:45,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-10T15:36:45,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-10T15:36:45,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6140 sec 2024-12-10T15:36:45,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.6250 sec 2024-12-10T15:36:45,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:45,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:45,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:45,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100d17cfc7ae6343b9b952738a3a59b8f6_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:45,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845065469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845065471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845065474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742042_1218 (size=17534) 2024-12-10T15:36:45,482 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:45,488 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100d17cfc7ae6343b9b952738a3a59b8f6_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100d17cfc7ae6343b9b952738a3a59b8f6_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:45,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c20dfc59c9db4d22915d4fd5a5e33346, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:45,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c20dfc59c9db4d22915d4fd5a5e33346 is 175, key is test_row_0/A:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:45,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742043_1219 (size=48639) 2024-12-10T15:36:45,520 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=382, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c20dfc59c9db4d22915d4fd5a5e33346 2024-12-10T15:36:45,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/257774cd849e4dccba1e8cbce95517eb is 50, key is test_row_0/B:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:45,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742044_1220 (size=12301) 2024-12-10T15:36:45,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845065575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845065575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845065583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-10T15:36:45,773 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-10T15:36:45,774 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-10T15:36:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:45,776 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:45,776 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:45,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:45,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845065781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845065786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845065790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:45,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41936 deadline: 1733845065879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,894 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:45,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41958 deadline: 1733845065891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,931 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:45,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T15:36:45,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:45,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:45,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:45,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:45,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:45,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:45,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/257774cd849e4dccba1e8cbce95517eb 2024-12-10T15:36:46,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/cc9d668b88ac40b294947fc2a8f210e8 is 50, key is test_row_0/C:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:46,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742045_1221 (size=12301) 2024-12-10T15:36:46,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=382 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/cc9d668b88ac40b294947fc2a8f210e8 2024-12-10T15:36:46,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/c20dfc59c9db4d22915d4fd5a5e33346 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346 2024-12-10T15:36:46,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346, entries=250, sequenceid=382, filesize=47.5 K 2024-12-10T15:36:46,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/257774cd849e4dccba1e8cbce95517eb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb 2024-12-10T15:36:46,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:46,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845066084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,087 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T15:36:46,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:46,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:46,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:46,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:46,092 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb, entries=150, sequenceid=382, filesize=12.0 K 2024-12-10T15:36:46,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/cc9d668b88ac40b294947fc2a8f210e8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8 2024-12-10T15:36:46,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845066093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845066095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8, entries=150, sequenceid=382, filesize=12.0 K 2024-12-10T15:36:46,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 94b551ecd4747174537fcd83980a419f in 702ms, sequenceid=382, compaction requested=true 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:46,123 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:36:46,123 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:46,123 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:46,132 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111967 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:46,132 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/A is initiating minor compaction (all files) 2024-12-10T15:36:46,132 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/A in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,132 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=109.3 K 2024-12-10T15:36:46,132 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,132 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346] 2024-12-10T15:36:46,135 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45cddd40764b417996e9663c9923fdee, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:46,135 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:46,135 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/C is initiating minor compaction (all files) 2024-12-10T15:36:46,135 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/C in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,135 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7a218c879e4f4f0ea68d42a94dd1d48d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=36.8 K 2024-12-10T15:36:46,140 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cd7fcef9d67487899a60cc73097d268, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733845003739 2024-12-10T15:36:46,140 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7a218c879e4f4f0ea68d42a94dd1d48d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:46,143 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c20dfc59c9db4d22915d4fd5a5e33346, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733845004293 2024-12-10T15:36:46,143 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a91560f14e6d430bac71a3a3afbd40bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733845003739 2024-12-10T15:36:46,152 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting cc9d668b88ac40b294947fc2a8f210e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733845004293 2024-12-10T15:36:46,198 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:46,201 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#C#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:46,201 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/46097179c12c44a780def6360622ba0c is 50, key is test_row_0/C:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:46,212 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412106e27cc108a5d4cec89cb3ab44e1c46fb_94b551ecd4747174537fcd83980a419f store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:46,213 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412106e27cc108a5d4cec89cb3ab44e1c46fb_94b551ecd4747174537fcd83980a419f, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:46,214 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106e27cc108a5d4cec89cb3ab44e1c46fb_94b551ecd4747174537fcd83980a419f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:46,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,244 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:46,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:46,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:46,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742047_1223 (size=4469) 2024-12-10T15:36:46,251 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#A#compaction#177 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742046_1222 (size=13221) 2024-12-10T15:36:46,252 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7a592738161a4c0890772d6203939597 is 175, key is test_row_0/A:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:46,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105a8e9dcfa1464351970431e7a4d6cedc_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845005469/Put/seqid=0 2024-12-10T15:36:46,267 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/46097179c12c44a780def6360622ba0c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/46097179c12c44a780def6360622ba0c 2024-12-10T15:36:46,280 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/C of 94b551ecd4747174537fcd83980a419f into 46097179c12c44a780def6360622ba0c(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:46,280 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:46,280 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/C, priority=13, startTime=1733845006123; duration=0sec 2024-12-10T15:36:46,280 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:46,280 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:46,282 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:46,283 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:46,283 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 94b551ecd4747174537fcd83980a419f/B is initiating minor compaction (all files) 2024-12-10T15:36:46,283 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 94b551ecd4747174537fcd83980a419f/B in TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:46,284 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/64a71a3e962d4b248fef029f0d32b8a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp, totalSize=36.8 K 2024-12-10T15:36:46,284 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 64a71a3e962d4b248fef029f0d32b8a2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1733845003024 2024-12-10T15:36:46,284 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8616e4f20be04fa7ae0b34134483656f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=365, earliestPutTs=1733845003739 2024-12-10T15:36:46,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742048_1224 (size=32175) 2024-12-10T15:36:46,287 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 257774cd849e4dccba1e8cbce95517eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=382, earliestPutTs=1733845004293 2024-12-10T15:36:46,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742049_1225 (size=12454) 2024-12-10T15:36:46,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:46,297 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 94b551ecd4747174537fcd83980a419f#B#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:46,298 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/aa99f9e63e6c4c298994a2ad4c8bf1ce is 50, key is test_row_0/B:col10/1733845005418/Put/seqid=0 2024-12-10T15:36:46,298 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105a8e9dcfa1464351970431e7a4d6cedc_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a8e9dcfa1464351970431e7a4d6cedc_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:46,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/3f2892eadfea43ea92d399f95eadb3ae, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:46,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/3f2892eadfea43ea92d399f95eadb3ae is 175, key is test_row_0/A:col10/1733845005469/Put/seqid=0 2024-12-10T15:36:46,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742050_1226 (size=13221) 2024-12-10T15:36:46,366 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/aa99f9e63e6c4c298994a2ad4c8bf1ce as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa99f9e63e6c4c298994a2ad4c8bf1ce 2024-12-10T15:36:46,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742051_1227 (size=31255) 2024-12-10T15:36:46,377 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/B of 94b551ecd4747174537fcd83980a419f into aa99f9e63e6c4c298994a2ad4c8bf1ce(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:46,377 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:46,377 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/B, priority=13, startTime=1733845006123; duration=0sec 2024-12-10T15:36:46,378 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:46,378 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:46,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. as already flushing 2024-12-10T15:36:46,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845066632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845066633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845066635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/7a592738161a4c0890772d6203939597 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7a592738161a4c0890772d6203939597 2024-12-10T15:36:46,724 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 94b551ecd4747174537fcd83980a419f/A of 94b551ecd4747174537fcd83980a419f into 7a592738161a4c0890772d6203939597(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:46,724 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:46,724 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f., storeName=94b551ecd4747174537fcd83980a419f/A, priority=13, startTime=1733845006123; duration=0sec 2024-12-10T15:36:46,724 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:46,725 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:46,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845066738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845066738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845066744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,775 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=404, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/3f2892eadfea43ea92d399f95eadb3ae 2024-12-10T15:36:46,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/bb41c9a00d574d938d7916d1660c92f3 is 50, key is test_row_0/B:col10/1733845005469/Put/seqid=0 2024-12-10T15:36:46,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742052_1228 (size=12301) 2024-12-10T15:36:46,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:46,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845066941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845066944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:46,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:46,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845066946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:47,215 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/bb41c9a00d574d938d7916d1660c92f3 2024-12-10T15:36:47,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9b4216bdaeeb4df89faebebf6535cd3b is 50, key is test_row_0/C:col10/1733845005469/Put/seqid=0 2024-12-10T15:36:47,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742053_1229 (size=12301) 2024-12-10T15:36:47,245 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=404 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9b4216bdaeeb4df89faebebf6535cd3b 2024-12-10T15:36:47,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:47,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42010 deadline: 1733845067244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:47,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/3f2892eadfea43ea92d399f95eadb3ae as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/3f2892eadfea43ea92d399f95eadb3ae 2024-12-10T15:36:47,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:47,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41978 deadline: 1733845067247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:47,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:47,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41962 deadline: 1733845067251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:47,253 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/3f2892eadfea43ea92d399f95eadb3ae, entries=150, sequenceid=404, filesize=30.5 K 2024-12-10T15:36:47,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/bb41c9a00d574d938d7916d1660c92f3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/bb41c9a00d574d938d7916d1660c92f3 2024-12-10T15:36:47,259 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/bb41c9a00d574d938d7916d1660c92f3, entries=150, sequenceid=404, filesize=12.0 K 2024-12-10T15:36:47,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/9b4216bdaeeb4df89faebebf6535cd3b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9b4216bdaeeb4df89faebebf6535cd3b 2024-12-10T15:36:47,267 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9b4216bdaeeb4df89faebebf6535cd3b, entries=150, sequenceid=404, filesize=12.0 K 2024-12-10T15:36:47,272 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 94b551ecd4747174537fcd83980a419f in 1028ms, sequenceid=404, compaction requested=false 2024-12-10T15:36:47,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:47,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:47,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-10T15:36:47,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-10T15:36:47,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-10T15:36:47,274 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4960 sec 2024-12-10T15:36:47,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.5010 sec 2024-12-10T15:36:47,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:47,749 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:47,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:47,760 DEBUG [Thread-581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:56346 2024-12-10T15:36:47,760 DEBUG [Thread-581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,760 DEBUG [Thread-586 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x14c16cd4 to 127.0.0.1:56346 2024-12-10T15:36:47,760 DEBUG [Thread-586 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,762 DEBUG [Thread-588 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0341384e to 127.0.0.1:56346 2024-12-10T15:36:47,762 DEBUG [Thread-588 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,762 DEBUG [Thread-577 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e3a4420 to 127.0.0.1:56346 2024-12-10T15:36:47,762 DEBUG [Thread-577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,763 DEBUG [Thread-579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:56346 2024-12-10T15:36:47,763 DEBUG [Thread-579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210300a0119041547c6b4ee28f3499240ec_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845007747/Put/seqid=0 2024-12-10T15:36:47,767 DEBUG [Thread-592 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c1ec7ee to 127.0.0.1:56346 2024-12-10T15:36:47,767 DEBUG [Thread-592 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,769 DEBUG [Thread-590 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26b120d9 to 127.0.0.1:56346 2024-12-10T15:36:47,769 DEBUG [Thread-590 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742054_1230 (size=14994) 2024-12-10T15:36:47,895 DEBUG [Thread-575 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:56346 2024-12-10T15:36:47,895 DEBUG [Thread-575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-10T15:36:47,898 INFO [Thread-585 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-10T15:36:47,899 DEBUG [Thread-583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x22e911df to 127.0.0.1:56346 2024-12-10T15:36:47,899 DEBUG [Thread-583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3946 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3897 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1623 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4869 rows 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1611 2024-12-10T15:36:47,900 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4833 rows 2024-12-10T15:36:47,900 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:36:47,900 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26401a5f to 127.0.0.1:56346 2024-12-10T15:36:47,900 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:36:47,907 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:36:47,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:36:47,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:47,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:47,912 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845007912"}]},"ts":"1733845007912"} 2024-12-10T15:36:47,918 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:36:47,929 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:36:47,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:36:47,930 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, UNASSIGN}] 2024-12-10T15:36:47,931 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=57, ppid=56, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, UNASSIGN 2024-12-10T15:36:47,931 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:47,932 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:36:47,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; CloseRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:48,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:48,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:48,084 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(124): Close 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:48,084 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:36:48,084 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1681): Closing 94b551ecd4747174537fcd83980a419f, disabling compactions & flushes 2024-12-10T15:36:48,084 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:48,176 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:48,181 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210300a0119041547c6b4ee28f3499240ec_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210300a0119041547c6b4ee28f3499240ec_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:48,184 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ca00facc2b3642db963de397b03dc773, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:48,184 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ca00facc2b3642db963de397b03dc773 is 175, key is test_row_0/A:col10/1733845007747/Put/seqid=0 2024-12-10T15:36:48,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742055_1231 (size=39949) 2024-12-10T15:36:48,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:48,596 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ca00facc2b3642db963de397b03dc773 2024-12-10T15:36:48,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/6b67a8289acf4ff18018a95c5eb10779 is 50, key is test_row_0/B:col10/1733845007747/Put/seqid=0 2024-12-10T15:36:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742056_1232 (size=12301) 2024-12-10T15:36:48,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/6b67a8289acf4ff18018a95c5eb10779 2024-12-10T15:36:48,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5daf5e9a5d68433ab2ec2b31719d7227 is 50, key is test_row_0/C:col10/1733845007747/Put/seqid=0 2024-12-10T15:36:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742057_1233 (size=12301) 2024-12-10T15:36:48,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5daf5e9a5d68433ab2ec2b31719d7227 2024-12-10T15:36:48,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ca00facc2b3642db963de397b03dc773 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ca00facc2b3642db963de397b03dc773 2024-12-10T15:36:48,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ca00facc2b3642db963de397b03dc773, entries=200, sequenceid=422, filesize=39.0 K 2024-12-10T15:36:48,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/6b67a8289acf4ff18018a95c5eb10779 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/6b67a8289acf4ff18018a95c5eb10779 2024-12-10T15:36:48,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/6b67a8289acf4ff18018a95c5eb10779, entries=150, sequenceid=422, filesize=12.0 K 2024-12-10T15:36:48,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/5daf5e9a5d68433ab2ec2b31719d7227 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5daf5e9a5d68433ab2ec2b31719d7227 2024-12-10T15:36:48,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5daf5e9a5d68433ab2ec2b31719d7227, entries=150, sequenceid=422, filesize=12.0 K 2024-12-10T15:36:48,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=67.09 KB/68700 for 94b551ecd4747174537fcd83980a419f in 952ms, sequenceid=422, compaction requested=true 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:48,701 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. because compaction request was cancelled 2024-12-10T15:36:48,701 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:A 2024-12-10T15:36:48,701 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. after waiting 0 ms 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:48,701 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:B, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. because compaction request was cancelled 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:B 2024-12-10T15:36:48,701 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(2837): Flushing 94b551ecd4747174537fcd83980a419f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 94b551ecd4747174537fcd83980a419f:C, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. because compaction request was cancelled 2024-12-10T15:36:48,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:48,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 94b551ecd4747174537fcd83980a419f:C 2024-12-10T15:36:48,701 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=A 2024-12-10T15:36:48,701 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:48,702 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=B 2024-12-10T15:36:48,702 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:48,702 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 94b551ecd4747174537fcd83980a419f, store=C 2024-12-10T15:36:48,702 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:48,721 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dc17a00fa047493ca46fe5e3b8e4717a_94b551ecd4747174537fcd83980a419f is 50, key is test_row_0/A:col10/1733845007761/Put/seqid=0 2024-12-10T15:36:48,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742058_1234 (size=12454) 2024-12-10T15:36:48,749 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:48,764 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dc17a00fa047493ca46fe5e3b8e4717a_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dc17a00fa047493ca46fe5e3b8e4717a_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:48,771 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ece06b787b6a4a36af6de0541fd7ab04, store: [table=TestAcidGuarantees family=A region=94b551ecd4747174537fcd83980a419f] 2024-12-10T15:36:48,771 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ece06b787b6a4a36af6de0541fd7ab04 is 175, key is test_row_0/A:col10/1733845007761/Put/seqid=0 2024-12-10T15:36:48,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742059_1235 (size=31255) 2024-12-10T15:36:49,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:49,218 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ece06b787b6a4a36af6de0541fd7ab04 2024-12-10T15:36:49,229 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/151b2a683792421e83814c30d56a1417 is 50, key is test_row_0/B:col10/1733845007761/Put/seqid=0 2024-12-10T15:36:49,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742060_1236 (size=12301) 2024-12-10T15:36:49,672 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/151b2a683792421e83814c30d56a1417 2024-12-10T15:36:49,689 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/b86bf042dde845cbb4cf88e5e24a6927 is 50, key is test_row_0/C:col10/1733845007761/Put/seqid=0 2024-12-10T15:36:49,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742061_1237 (size=12301) 2024-12-10T15:36:50,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:50,105 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/b86bf042dde845cbb4cf88e5e24a6927 2024-12-10T15:36:50,110 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/A/ece06b787b6a4a36af6de0541fd7ab04 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ece06b787b6a4a36af6de0541fd7ab04 2024-12-10T15:36:50,116 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ece06b787b6a4a36af6de0541fd7ab04, entries=150, sequenceid=435, filesize=30.5 K 2024-12-10T15:36:50,117 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/B/151b2a683792421e83814c30d56a1417 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/151b2a683792421e83814c30d56a1417 2024-12-10T15:36:50,123 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/151b2a683792421e83814c30d56a1417, entries=150, sequenceid=435, filesize=12.0 K 2024-12-10T15:36:50,127 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/.tmp/C/b86bf042dde845cbb4cf88e5e24a6927 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/b86bf042dde845cbb4cf88e5e24a6927 2024-12-10T15:36:50,139 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/b86bf042dde845cbb4cf88e5e24a6927, entries=150, sequenceid=435, filesize=12.0 K 2024-12-10T15:36:50,140 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 94b551ecd4747174537fcd83980a419f in 1439ms, sequenceid=435, compaction requested=true 2024-12-10T15:36:50,141 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346] to archive 2024-12-10T15:36:50,143 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/015ca286e51d4659b9a33d836de72cda 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/df5f54dbc99045f38221f6faf544ffcf 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/d3f1bf63c20047c9a5192463cd610254 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/85929a8524264170982a1d714762312d 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7c703f442e60411ca597f489b21b6ad6 2024-12-10T15:36:50,148 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/399128fef0c449a9b56c6dd152f93218 2024-12-10T15:36:50,149 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/92ba4dadfa784e508dabcd24966580b5 2024-12-10T15:36:50,150 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7d70817a2bc74d53b67ccce5cf5f22e3 2024-12-10T15:36:50,153 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/f38e9961fd5e4bfba0529c43d9c06572 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/5e93c3ce3f8c451dbd9a452149e6b693 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/84c26c7ddea84bce9b0168618d852ad6 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/febfde6d8f384c1d95fff4b41dddf8e1 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/0857035d820d48f6907d3a080ddbd4ba 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/fc9dd20615df43b49e08a03bf06c2c78 2024-12-10T15:36:50,154 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/83ea3f03ed964ac6b1ee5d063ba4f4ac 2024-12-10T15:36:50,155 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7af34ba65dd24a2d94270e11430d2a50 2024-12-10T15:36:50,158 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7ed1dd66c7954daaa9dc69eaab34716d 2024-12-10T15:36:50,158 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/80d4551888934e94a9193ea105d2d00e 2024-12-10T15:36:50,159 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/adc141272ff74311aa31536b6dbf1322 2024-12-10T15:36:50,159 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/78a1fc7614fe44e694d4daeab4e4dcfa 2024-12-10T15:36:50,160 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/45cddd40764b417996e9663c9923fdee 2024-12-10T15:36:50,160 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/1c1143ac18bc4467a55a1037a7183a14 2024-12-10T15:36:50,160 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/05965da93cbd49a6b80d3c910b753cd5 2024-12-10T15:36:50,160 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/b99fa4f522b2401090a36300183133ec 2024-12-10T15:36:50,161 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c20dfc59c9db4d22915d4fd5a5e33346 2024-12-10T15:36:50,161 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/6cd7fcef9d67487899a60cc73097d268 2024-12-10T15:36:50,162 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/c0bfb16deee54402b229b5fd71e14a7a 2024-12-10T15:36:50,174 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/a3fc0d5a01af4deba6249a2256e68116, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/988138fbae0a4f9ba1e9ac9fe4ff49be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/88667b6cb33540d7b976f21e690d22f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/412268f21ba941859580c20d035b0805, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/ed89e24aee334ad99e8f3a1fb55a6b2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/f2abc4bd189643329cd81d48654b9616, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/64a71a3e962d4b248fef029f0d32b8a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb] to archive 2024-12-10T15:36:50,180 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:50,184 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/7344dfe10a8d40e5947d1936abe1c182 2024-12-10T15:36:50,184 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/a3fc0d5a01af4deba6249a2256e68116 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/a3fc0d5a01af4deba6249a2256e68116 2024-12-10T15:36:50,189 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43e9d81679ee41279783b1a6ec3adeee 2024-12-10T15:36:50,189 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/63c5de94a15941f984ed3c63979e5367 2024-12-10T15:36:50,190 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0b64b6ad54684b8abad7cf1ff30163ca 2024-12-10T15:36:50,190 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/988138fbae0a4f9ba1e9ac9fe4ff49be to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/988138fbae0a4f9ba1e9ac9fe4ff49be 2024-12-10T15:36:50,190 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8823edb95c054d91acd31a1932d67748 2024-12-10T15:36:50,191 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0123da49372142c1ab83b33fd53994c6 2024-12-10T15:36:50,203 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/296398fbc52a4f588a649430a6d151fe 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/eec13a2bfb7e47edb007ad53b97e7795 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/0e9f7c6d0f86476098f0b07884a17f88 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/ed89e24aee334ad99e8f3a1fb55a6b2f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/ed89e24aee334ad99e8f3a1fb55a6b2f 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/71072896258d4d29bf0831a3215d0e21 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/567e877889994ac48d0d985f043cecf9 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/412268f21ba941859580c20d035b0805 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/412268f21ba941859580c20d035b0805 2024-12-10T15:36:50,204 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/88667b6cb33540d7b976f21e690d22f7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/88667b6cb33540d7b976f21e690d22f7 2024-12-10T15:36:50,206 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa2f323696534a67b8f55172cefd4980 2024-12-10T15:36:50,206 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/86061e04355f4826b16b85fe0dacbb72 2024-12-10T15:36:50,207 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/83100efbcc564e03b0e76598dee3e76c 2024-12-10T15:36:50,207 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/f2abc4bd189643329cd81d48654b9616 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/f2abc4bd189643329cd81d48654b9616 2024-12-10T15:36:50,208 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/43d51662a8f4449cb2d539a08b77a865 2024-12-10T15:36:50,208 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/e06fd3018bb9487c996a00787ff78183 2024-12-10T15:36:50,208 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/04d3543e489b44c5983265a877425509 2024-12-10T15:36:50,208 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/64a71a3e962d4b248fef029f0d32b8a2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/64a71a3e962d4b248fef029f0d32b8a2 2024-12-10T15:36:50,209 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/256657f0677b43e8b4acc3195889ccb8 2024-12-10T15:36:50,209 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/8616e4f20be04fa7ae0b34134483656f 2024-12-10T15:36:50,209 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/257774cd849e4dccba1e8cbce95517eb 2024-12-10T15:36:50,213 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90ffc0c60e29491ca94194b50faa8bd5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/fc0b6d60fcdd453f8ac983889809ff28, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6e1fcd987c3b46d39da9fb3e93d69b0f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2517c7e4d61c4967a6069a985a1c49d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/89f51b9d0944452eb1f8935f072fc027, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f216178dfaf94430a65595f56d6d6447, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7a218c879e4f4f0ea68d42a94dd1d48d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8] to archive 2024-12-10T15:36:50,214 DEBUG [StoreCloser-TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:36:50,219 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90192224fdc149eca30fb8798fa29613 2024-12-10T15:36:50,220 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90ffc0c60e29491ca94194b50faa8bd5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/90ffc0c60e29491ca94194b50faa8bd5 2024-12-10T15:36:50,220 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f62257d602eb4641a9c2e1d5030e6be9 2024-12-10T15:36:50,222 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/fc0b6d60fcdd453f8ac983889809ff28 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/fc0b6d60fcdd453f8ac983889809ff28 2024-12-10T15:36:50,222 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5aec35dba6544850b171f486ab876a4a 2024-12-10T15:36:50,223 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/243dee43af58445790bb09707ce7e36d 2024-12-10T15:36:50,223 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aeb9ea2f9f1c4b20917937bc70c0f776 2024-12-10T15:36:50,223 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9bbbe77e3f184447b129c86b5067f41b 2024-12-10T15:36:50,223 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2e1d1765f0ed41569a1d26d2afff4d5d 2024-12-10T15:36:50,223 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6e1fcd987c3b46d39da9fb3e93d69b0f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6e1fcd987c3b46d39da9fb3e93d69b0f 2024-12-10T15:36:50,224 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/1e003d2695944244b42eba4f66c8ba44 2024-12-10T15:36:50,227 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2517c7e4d61c4967a6069a985a1c49d0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/2517c7e4d61c4967a6069a985a1c49d0 2024-12-10T15:36:50,227 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/caeb8d9a5fda4c11b769ccd554a487d2 2024-12-10T15:36:50,227 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/94eb0e5bfb1447aa8151e199c996081d 2024-12-10T15:36:50,227 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/381a85df9cef41dab9fc6ff7a7de5289 2024-12-10T15:36:50,227 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/89f51b9d0944452eb1f8935f072fc027 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/89f51b9d0944452eb1f8935f072fc027 2024-12-10T15:36:50,228 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/aec8bec58ec542f1a2b5476dc08f7a87 2024-12-10T15:36:50,228 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/707ad748debd4aa58011df173fb3ce76 2024-12-10T15:36:50,228 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/13ceb6adee3c4b8ab5a7bcf2598d601a 2024-12-10T15:36:50,230 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/dafcb760e05a432093d19fef30f97f73 2024-12-10T15:36:50,232 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f216178dfaf94430a65595f56d6d6447 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/f216178dfaf94430a65595f56d6d6447 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/6ce2012b728c47bdaea3295837fefcde 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/c4e35ca8f22f467290be5731fa467f1e 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7a218c879e4f4f0ea68d42a94dd1d48d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7a218c879e4f4f0ea68d42a94dd1d48d 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/7b30e5bb331f4dfe85229a006ab60cf8 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/a91560f14e6d430bac71a3a3afbd40bf 2024-12-10T15:36:50,233 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/cc9d668b88ac40b294947fc2a8f210e8 2024-12-10T15:36:50,243 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits/438.seqid, newMaxSeqId=438, maxSeqId=4 2024-12-10T15:36:50,244 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f. 2024-12-10T15:36:50,245 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] regionserver.HRegion(1635): Region close journal for 94b551ecd4747174537fcd83980a419f: 2024-12-10T15:36:50,245 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:36:50,246 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=58}] handler.UnassignRegionHandler(170): Closed 94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:50,246 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=57 updating hbase:meta row=94b551ecd4747174537fcd83980a419f, regionState=CLOSED 2024-12-10T15:36:50,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-10T15:36:50,248 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; CloseRegionProcedure 94b551ecd4747174537fcd83980a419f, server=bf0fec90ff6d,46239,1733844953049 in 2.3150 sec 2024-12-10T15:36:50,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-10T15:36:50,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=94b551ecd4747174537fcd83980a419f, UNASSIGN in 2.3180 sec 2024-12-10T15:36:50,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-10T15:36:50,250 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.3200 sec 2024-12-10T15:36:50,251 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845010251"}]},"ts":"1733845010251"} 2024-12-10T15:36:50,255 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:36:50,296 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:36:50,297 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3880 sec 2024-12-10T15:36:52,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-10T15:36:52,017 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-10T15:36:52,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:36:52,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,021 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=59, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,022 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=59, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T15:36:52,027 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,031 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits] 2024-12-10T15:36:52,036 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/3f2892eadfea43ea92d399f95eadb3ae to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/3f2892eadfea43ea92d399f95eadb3ae 2024-12-10T15:36:52,036 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7a592738161a4c0890772d6203939597 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/7a592738161a4c0890772d6203939597 2024-12-10T15:36:52,036 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ece06b787b6a4a36af6de0541fd7ab04 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ece06b787b6a4a36af6de0541fd7ab04 2024-12-10T15:36:52,037 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ca00facc2b3642db963de397b03dc773 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/A/ca00facc2b3642db963de397b03dc773 2024-12-10T15:36:52,043 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/151b2a683792421e83814c30d56a1417 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/151b2a683792421e83814c30d56a1417 2024-12-10T15:36:52,044 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa99f9e63e6c4c298994a2ad4c8bf1ce to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/aa99f9e63e6c4c298994a2ad4c8bf1ce 2024-12-10T15:36:52,044 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/bb41c9a00d574d938d7916d1660c92f3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/bb41c9a00d574d938d7916d1660c92f3 2024-12-10T15:36:52,045 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/6b67a8289acf4ff18018a95c5eb10779 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/B/6b67a8289acf4ff18018a95c5eb10779 2024-12-10T15:36:52,063 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5daf5e9a5d68433ab2ec2b31719d7227 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/5daf5e9a5d68433ab2ec2b31719d7227 2024-12-10T15:36:52,063 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/46097179c12c44a780def6360622ba0c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/46097179c12c44a780def6360622ba0c 2024-12-10T15:36:52,064 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9b4216bdaeeb4df89faebebf6535cd3b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/9b4216bdaeeb4df89faebebf6535cd3b 2024-12-10T15:36:52,065 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/b86bf042dde845cbb4cf88e5e24a6927 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/C/b86bf042dde845cbb4cf88e5e24a6927 2024-12-10T15:36:52,069 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits/438.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f/recovered.edits/438.seqid 2024-12-10T15:36:52,070 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,070 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:36:52,071 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:36:52,072 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T15:36:52,095 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100d17cfc7ae6343b9b952738a3a59b8f6_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100d17cfc7ae6343b9b952738a3a59b8f6_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,095 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210013b2867a05f4d2cb32eaa1235b737fa_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210013b2867a05f4d2cb32eaa1235b737fa_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210305aee5fa6c54160b51824c8c9b677a4_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210305aee5fa6c54160b51824c8c9b677a4_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210300a0119041547c6b4ee28f3499240ec_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210300a0119041547c6b4ee28f3499240ec_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102765691ee3c84fcc859b57715c08cb31_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412102765691ee3c84fcc859b57715c08cb31_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101fa9eb6c0d3f42259140e8ffff4e2dab_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101fa9eb6c0d3f42259140e8ffff4e2dab_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a8e9dcfa1464351970431e7a4d6cedc_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a8e9dcfa1464351970431e7a4d6cedc_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,098 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104862441ee0c64ef19b7627dbb3f67fd8_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104862441ee0c64ef19b7627dbb3f67fd8_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,104 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108270aa97c2184bcc89f328c90422ce72_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108270aa97c2184bcc89f328c90422ce72_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,104 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210717b451fe68b447abdf82b826f33ceb0_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210717b451fe68b447abdf82b826f33ceb0_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,105 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108bfe4883e9d54aeba73652ff9d7a1da3_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108bfe4883e9d54aeba73652ff9d7a1da3_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,105 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109292ad780c994a56a1cb0bca89c0097a_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109292ad780c994a56a1cb0bca89c0097a_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,105 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210957406d25c344e028bb518aac838788d_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210957406d25c344e028bb518aac838788d_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,105 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210944834097b9b46c3ab23226a0aa79193_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210944834097b9b46c3ab23226a0aa79193_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,106 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108cdbaf72ed3c4413b8ce6b7e9522978d_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108cdbaf72ed3c4413b8ce6b7e9522978d_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,108 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121086efa0e9c30343179c5ca31bea122a49_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121086efa0e9c30343179c5ca31bea122a49_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,108 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210abcdf9b245ed4902ac6b049f6941301c_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210abcdf9b245ed4902ac6b049f6941301c_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,108 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b610132769354b0c86866c010bb6d5be_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b610132769354b0c86866c010bb6d5be_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,109 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f1fffa68adb242e79446d189c585316f_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f1fffa68adb242e79446d189c585316f_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,109 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b20dd74c5287424cbd9da769719df6d0_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b20dd74c5287424cbd9da769719df6d0_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,109 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c419d22795e547af9efd63704f81e0f1_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c419d22795e547af9efd63704f81e0f1_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,109 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dc17a00fa047493ca46fe5e3b8e4717a_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dc17a00fa047493ca46fe5e3b8e4717a_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,110 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fec977c77189423ea7fe0bbdaeac728b_94b551ecd4747174537fcd83980a419f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fec977c77189423ea7fe0bbdaeac728b_94b551ecd4747174537fcd83980a419f 2024-12-10T15:36:52,117 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:36:52,122 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=59, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T15:36:52,138 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:36:52,159 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:36:52,161 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=59, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,161 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:36:52,161 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733845012161"}]},"ts":"9223372036854775807"} 2024-12-10T15:36:52,167 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:36:52,167 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 94b551ecd4747174537fcd83980a419f, NAME => 'TestAcidGuarantees,,1733844984413.94b551ecd4747174537fcd83980a419f.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:36:52,167 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:36:52,169 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733845012167"}]},"ts":"9223372036854775807"} 2024-12-10T15:36:52,180 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:36:52,203 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=59, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,205 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 186 msec 2024-12-10T15:36:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-10T15:36:52,327 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-10T15:36:52,340 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=248 (was 246) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_359415945_22 at /127.0.0.1:33040 [Waiting for operation #490] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-65916801_22 at /127.0.0.1:57682 [Waiting for operation #687] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-65916801_22 at /127.0.0.1:43344 [Waiting for operation #584] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xddc02ee-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_359415945_22 at /127.0.0.1:33056 [Waiting for operation #482] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=468 (was 459) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1191 (was 1112) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1944 (was 1232) - AvailableMemoryMB LEAK? - 2024-12-10T15:36:52,357 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=248, OpenFileDescriptor=470, MaxFileDescriptor=1048576, SystemLoadAverage=1191, ProcessCount=11, AvailableMemoryMB=1942 2024-12-10T15:36:52,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:36:52,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:36:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:36:52,360 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:36:52,360 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:52,360 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 60 2024-12-10T15:36:52,361 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:36:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:52,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742062_1238 (size=963) 2024-12-10T15:36:52,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:36:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:52,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:52,807 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:36:52,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742063_1239 (size=53) 2024-12-10T15:36:52,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 614727b67ed1c48d9acfd143d8b127a7, disabling compactions & flushes 2024-12-10T15:36:53,213 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. after waiting 0 ms 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,213 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,213 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:53,214 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:36:53,214 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733845013214"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733845013214"}]},"ts":"1733845013214"} 2024-12-10T15:36:53,215 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:36:53,215 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:36:53,215 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845013215"}]},"ts":"1733845013215"} 2024-12-10T15:36:53,216 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:36:53,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, ASSIGN}] 2024-12-10T15:36:53,238 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, ASSIGN 2024-12-10T15:36:53,239 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:36:53,389 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=614727b67ed1c48d9acfd143d8b127a7, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:53,391 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; OpenRegionProcedure 614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:36:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:53,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:53,545 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,545 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7285): Opening region: {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:36:53,545 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,545 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:36:53,546 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7327): checking encryption for 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,546 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7330): checking classloading for 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,547 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,548 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:53,548 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 614727b67ed1c48d9acfd143d8b127a7 columnFamilyName A 2024-12-10T15:36:53,548 DEBUG [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:53,552 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(327): Store=614727b67ed1c48d9acfd143d8b127a7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:53,552 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,553 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:53,554 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 614727b67ed1c48d9acfd143d8b127a7 columnFamilyName B 2024-12-10T15:36:53,554 DEBUG [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:53,554 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(327): Store=614727b67ed1c48d9acfd143d8b127a7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:53,555 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,556 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:36:53,556 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 614727b67ed1c48d9acfd143d8b127a7 columnFamilyName C 2024-12-10T15:36:53,556 DEBUG [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:36:53,557 INFO [StoreOpener-614727b67ed1c48d9acfd143d8b127a7-1 {}] regionserver.HStore(327): Store=614727b67ed1c48d9acfd143d8b127a7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:36:53,557 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,560 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,561 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,564 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:36:53,565 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1085): writing seq id for 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:53,567 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:36:53,567 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1102): Opened 614727b67ed1c48d9acfd143d8b127a7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73882539, jitterRate=0.10093562304973602}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:36:53,568 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1001): Region open journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:53,570 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., pid=62, masterSystemTime=1733845013542 2024-12-10T15:36:53,571 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,571 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:53,575 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=614727b67ed1c48d9acfd143d8b127a7, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:53,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-10T15:36:53,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; OpenRegionProcedure 614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 in 186 msec 2024-12-10T15:36:53,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-10T15:36:53,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, ASSIGN in 341 msec 2024-12-10T15:36:53,580 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:36:53,580 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845013580"}]},"ts":"1733845013580"} 2024-12-10T15:36:53,581 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:36:53,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=60, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:36:53,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2640 sec 2024-12-10T15:36:54,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=60 2024-12-10T15:36:54,465 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 60 completed 2024-12-10T15:36:54,467 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-12-10T15:36:54,471 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,472 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,473 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,474 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:36:54,475 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48640, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:36:54,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-12-10T15:36:54,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,490 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-10T15:36:54,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,497 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-12-10T15:36:54,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,506 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-12-10T15:36:54,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,514 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-12-10T15:36:54,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,522 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-10T15:36:54,531 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,532 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-10T15:36:54,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,541 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-12-10T15:36:54,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,548 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-12-10T15:36:54,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,564 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x598cfed4 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@521aad6f 2024-12-10T15:36:54,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c86f707, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:36:54,595 DEBUG [hconnection-0x13a547a1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,600 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58218, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,639 DEBUG [hconnection-0x1f6aa33a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,641 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,648 DEBUG [hconnection-0x43c70700-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,656 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:54,660 DEBUG [hconnection-0x3db5d160-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,664 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58224, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-10T15:36:54,675 DEBUG [hconnection-0x3860fb6c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,676 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:54,676 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:54,683 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:54,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:54,687 DEBUG [hconnection-0x371f42d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:54,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:54,693 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:54,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:54,698 DEBUG [hconnection-0x3b131804-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,699 DEBUG [hconnection-0x74586e67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,700 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,701 DEBUG [hconnection-0x7e41a7db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,701 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58264, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,702 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,707 DEBUG [hconnection-0x7efbe76d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:36:54,716 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:36:54,749 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2f2b9765944b4f86abdc76f0a6efcc7f is 50, key is test_row_0/A:col10/1733845014685/Put/seqid=0 2024-12-10T15:36:54,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845074764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845074764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845074766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845074777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:54,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845074777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742064_1240 (size=12001) 2024-12-10T15:36:54,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2f2b9765944b4f86abdc76f0a6efcc7f 2024-12-10T15:36:54,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-10T15:36:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:54,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:54,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:54,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:54,865 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3e145ea3130246438c50ac752797bd20 is 50, key is test_row_0/B:col10/1733845014685/Put/seqid=0 2024-12-10T15:36:54,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845074876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845074878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845074878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845074883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:54,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845074891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:54,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742065_1241 (size=12001) 2024-12-10T15:36:54,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3e145ea3130246438c50ac752797bd20 2024-12-10T15:36:54,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbd530eb12d544768f84d53b23713bf6 is 50, key is test_row_0/C:col10/1733845014685/Put/seqid=0 2024-12-10T15:36:54,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742066_1242 (size=12001) 2024-12-10T15:36:54,977 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbd530eb12d544768f84d53b23713bf6 2024-12-10T15:36:54,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:54,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2f2b9765944b4f86abdc76f0a6efcc7f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f 2024-12-10T15:36:54,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T15:36:55,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3e145ea3130246438c50ac752797bd20 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20 2024-12-10T15:36:55,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T15:36:55,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbd530eb12d544768f84d53b23713bf6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6 2024-12-10T15:36:55,014 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-10T15:36:55,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:55,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T15:36:55,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 614727b67ed1c48d9acfd143d8b127a7 in 342ms, sequenceid=13, compaction requested=false 2024-12-10T15:36:55,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:55,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:55,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:55,082 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/006a285487ed446e889df267e84413da is 50, key is test_row_0/A:col10/1733845014769/Put/seqid=0 2024-12-10T15:36:55,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845075097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845075097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845075097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845075100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845075104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742067_1243 (size=12001) 2024-12-10T15:36:55,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/006a285487ed446e889df267e84413da 2024-12-10T15:36:55,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f18c2a6344ab4141b7441bce96dc4588 is 50, key is test_row_0/B:col10/1733845014769/Put/seqid=0 2024-12-10T15:36:55,169 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-10T15:36:55,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:55,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:55,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742068_1244 (size=12001) 2024-12-10T15:36:55,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f18c2a6344ab4141b7441bce96dc4588 2024-12-10T15:36:55,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/f0c50c30dd8a4798915552841789771b is 50, key is test_row_0/C:col10/1733845014769/Put/seqid=0 2024-12-10T15:36:55,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845075205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845075205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845075209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845075209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742069_1245 (size=12001) 2024-12-10T15:36:55,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/f0c50c30dd8a4798915552841789771b 2024-12-10T15:36:55,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/006a285487ed446e889df267e84413da as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da 2024-12-10T15:36:55,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T15:36:55,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f18c2a6344ab4141b7441bce96dc4588 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588 2024-12-10T15:36:55,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T15:36:55,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/f0c50c30dd8a4798915552841789771b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b 2024-12-10T15:36:55,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:55,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b, entries=150, sequenceid=39, filesize=11.7 K 2024-12-10T15:36:55,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 614727b67ed1c48d9acfd143d8b127a7 in 210ms, sequenceid=39, compaction requested=false 2024-12-10T15:36:55,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:55,324 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,328 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:55,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:55,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/59304ddb278d4a27bc0dedabef31064a is 50, key is test_row_0/A:col10/1733845015096/Put/seqid=0 2024-12-10T15:36:55,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742070_1246 (size=12001) 2024-12-10T15:36:55,347 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/59304ddb278d4a27bc0dedabef31064a 2024-12-10T15:36:55,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/9364ae9a176a49ac9a13a42e73e7196a is 50, key is test_row_0/B:col10/1733845015096/Put/seqid=0 2024-12-10T15:36:55,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742071_1247 (size=12001) 2024-12-10T15:36:55,386 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/9364ae9a176a49ac9a13a42e73e7196a 2024-12-10T15:36:55,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:55,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:55,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/10a6e43d41d74f5f9d36e847c453602b is 50, key is test_row_0/C:col10/1733845015096/Put/seqid=0 2024-12-10T15:36:55,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742072_1248 (size=12001) 2024-12-10T15:36:55,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845075436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845075436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845075437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845075437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845075439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845075542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845075542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845075542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845075545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,550 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845075550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845075744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845075745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845075746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845075751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845075754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:55,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:55,794 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:36:55,840 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/10a6e43d41d74f5f9d36e847c453602b 2024-12-10T15:36:55,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/59304ddb278d4a27bc0dedabef31064a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a 2024-12-10T15:36:55,892 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a, entries=150, sequenceid=50, filesize=11.7 K 2024-12-10T15:36:55,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/9364ae9a176a49ac9a13a42e73e7196a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a 2024-12-10T15:36:55,925 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a, entries=150, sequenceid=50, filesize=11.7 K 2024-12-10T15:36:55,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/10a6e43d41d74f5f9d36e847c453602b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b 2024-12-10T15:36:55,935 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b, entries=150, sequenceid=50, filesize=11.7 K 2024-12-10T15:36:55,940 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 614727b67ed1c48d9acfd143d8b127a7 in 612ms, sequenceid=50, compaction requested=true 2024-12-10T15:36:55,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:55,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:55,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-10T15:36:55,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-10T15:36:55,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-10T15:36:55,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2610 sec 2024-12-10T15:36:55,947 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.2870 sec 2024-12-10T15:36:56,053 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:56,054 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:56,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845076067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845076067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/61538ec85bd942e29b56aa426aef718c is 50, key is test_row_0/A:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845076069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845076071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845076070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742073_1249 (size=14341) 2024-12-10T15:36:56,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/61538ec85bd942e29b56aa426aef718c 2024-12-10T15:36:56,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/095b34fbc6a34196b2f45ffb1cd367a1 is 50, key is test_row_0/B:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845076197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845076197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845076199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845076201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845076201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742074_1250 (size=12001) 2024-12-10T15:36:56,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/095b34fbc6a34196b2f45ffb1cd367a1 2024-12-10T15:36:56,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/da554007c3584eba9b6bdeadeb6f2fab is 50, key is test_row_0/C:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742075_1251 (size=12001) 2024-12-10T15:36:56,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/da554007c3584eba9b6bdeadeb6f2fab 2024-12-10T15:36:56,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/61538ec85bd942e29b56aa426aef718c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c 2024-12-10T15:36:56,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c, entries=200, sequenceid=76, filesize=14.0 K 2024-12-10T15:36:56,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/095b34fbc6a34196b2f45ffb1cd367a1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1 2024-12-10T15:36:56,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1, entries=150, sequenceid=76, filesize=11.7 K 2024-12-10T15:36:56,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/da554007c3584eba9b6bdeadeb6f2fab as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab 2024-12-10T15:36:56,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab, entries=150, sequenceid=76, filesize=11.7 K 2024-12-10T15:36:56,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 614727b67ed1c48d9acfd143d8b127a7 in 243ms, sequenceid=76, compaction requested=true 2024-12-10T15:36:56,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:56,297 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:56,297 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:56,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:36:56,298 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:56,298 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:56,298 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=46.9 K 2024-12-10T15:36:56,298 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=49.2 K 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e145ea3130246438c50ac752797bd20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845014685 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f2b9765944b4f86abdc76f0a6efcc7f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845014685 2024-12-10T15:36:56,298 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f18c2a6344ab4141b7441bce96dc4588, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733845014768 2024-12-10T15:36:56,299 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 006a285487ed446e889df267e84413da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733845014768 2024-12-10T15:36:56,299 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 9364ae9a176a49ac9a13a42e73e7196a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733845015087 2024-12-10T15:36:56,299 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 59304ddb278d4a27bc0dedabef31064a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733845015087 2024-12-10T15:36:56,299 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61538ec85bd942e29b56aa426aef718c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:56,299 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 095b34fbc6a34196b2f45ffb1cd367a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:56,307 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:56,307 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:56,308 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/d68a9133e7d340548a812d05c513dce5 is 50, key is test_row_0/B:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,309 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/ceaa648f99194f499d464373824f43a6 is 50, key is test_row_0/A:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742076_1252 (size=12139) 2024-12-10T15:36:56,325 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/d68a9133e7d340548a812d05c513dce5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d68a9133e7d340548a812d05c513dce5 2024-12-10T15:36:56,331 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into d68a9133e7d340548a812d05c513dce5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:56,331 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:56,331 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=12, startTime=1733845016297; duration=0sec 2024-12-10T15:36:56,331 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:56,331 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:36:56,331 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:56,334 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:56,334 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:36:56,334 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:56,334 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=46.9 K 2024-12-10T15:36:56,334 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting bbd530eb12d544768f84d53b23713bf6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845014685 2024-12-10T15:36:56,335 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f0c50c30dd8a4798915552841789771b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733845014768 2024-12-10T15:36:56,335 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 10a6e43d41d74f5f9d36e847c453602b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1733845015087 2024-12-10T15:36:56,335 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting da554007c3584eba9b6bdeadeb6f2fab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:56,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742077_1253 (size=12139) 2024-12-10T15:36:56,359 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#203 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:56,359 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7e5bad6e79d042789ce04f08e31e7dc9 is 50, key is test_row_0/C:col10/1733845015434/Put/seqid=0 2024-12-10T15:36:56,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742078_1254 (size=12139) 2024-12-10T15:36:56,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:56,404 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:56,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:56,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:56,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:56,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:56,407 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7e5bad6e79d042789ce04f08e31e7dc9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7e5bad6e79d042789ce04f08e31e7dc9 2024-12-10T15:36:56,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/854dde02391b4d9f90192b053ae7f59f is 50, key is test_row_0/A:col10/1733845016068/Put/seqid=0 2024-12-10T15:36:56,426 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 7e5bad6e79d042789ce04f08e31e7dc9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:56,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:56,426 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=12, startTime=1733845016297; duration=0sec 2024-12-10T15:36:56,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:56,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:36:56,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845076437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845076437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845076439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845076440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845076441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742079_1255 (size=12001) 2024-12-10T15:36:56,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845076544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845076544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845076544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845076545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,548 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845076546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,753 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/ceaa648f99194f499d464373824f43a6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/ceaa648f99194f499d464373824f43a6 2024-12-10T15:36:56,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845076749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845076749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845076750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845076750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:56,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845076749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,761 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into ceaa648f99194f499d464373824f43a6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:56,761 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:56,761 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=12, startTime=1733845016297; duration=0sec 2024-12-10T15:36:56,761 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:56,761 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:36:56,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-10T15:36:56,789 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-10T15:36:56,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:56,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-10T15:36:56,796 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:56,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:56,797 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:56,797 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:56,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/854dde02391b4d9f90192b053ae7f59f 2024-12-10T15:36:56,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/de80b671530a4e408426cf0069d623a8 is 50, key is test_row_0/B:col10/1733845016068/Put/seqid=0 2024-12-10T15:36:56,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:56,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742080_1256 (size=12001) 2024-12-10T15:36:56,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/de80b671530a4e408426cf0069d623a8 2024-12-10T15:36:56,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e55ef35ea0b54f029596fe18b30685df is 50, key is test_row_0/C:col10/1733845016068/Put/seqid=0 2024-12-10T15:36:56,955 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:56,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-10T15:36:56,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:56,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:56,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:56,956 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:56,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:56,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:56,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742081_1257 (size=12001) 2024-12-10T15:36:57,000 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e55ef35ea0b54f029596fe18b30685df 2024-12-10T15:36:57,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/854dde02391b4d9f90192b053ae7f59f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f 2024-12-10T15:36:57,017 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T15:36:57,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/de80b671530a4e408426cf0069d623a8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8 2024-12-10T15:36:57,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T15:36:57,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e55ef35ea0b54f029596fe18b30685df as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df 2024-12-10T15:36:57,050 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df, entries=150, sequenceid=88, filesize=11.7 K 2024-12-10T15:36:57,054 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-10T15:36:57,058 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 614727b67ed1c48d9acfd143d8b127a7 in 654ms, sequenceid=88, compaction requested=false 2024-12-10T15:36:57,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:57,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:36:57,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:57,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:57,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:57,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845077078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845077084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845077085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845077084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845077085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/aaa1ec7648fb45be94c25f794581c223 is 50, key is test_row_0/A:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:57,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742082_1258 (size=12001) 2024-12-10T15:36:57,121 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/aaa1ec7648fb45be94c25f794581c223 2024-12-10T15:36:57,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-10T15:36:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:57,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:57,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:57,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/2e46d7b8be8f469fb1c6340baee6416b is 50, key is test_row_0/B:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845077187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845077192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845077192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845077192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845077192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742083_1259 (size=12001) 2024-12-10T15:36:57,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/2e46d7b8be8f469fb1c6340baee6416b 2024-12-10T15:36:57,205 DEBUG [master/bf0fec90ff6d:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region be7c8615eacea7669b98cff7543a195b changed from -1.0 to 0.0, refreshing cache 2024-12-10T15:36:57,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bcc3d40b5af94cf48861476fa1b13307 is 50, key is test_row_0/C:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742084_1260 (size=12001) 2024-12-10T15:36:57,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bcc3d40b5af94cf48861476fa1b13307 2024-12-10T15:36:57,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/aaa1ec7648fb45be94c25f794581c223 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223 2024-12-10T15:36:57,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223, entries=150, sequenceid=117, filesize=11.7 K 2024-12-10T15:36:57,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/2e46d7b8be8f469fb1c6340baee6416b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b 2024-12-10T15:36:57,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b, entries=150, sequenceid=117, filesize=11.7 K 2024-12-10T15:36:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bcc3d40b5af94cf48861476fa1b13307 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307 2024-12-10T15:36:57,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307, entries=150, sequenceid=117, filesize=11.7 K 2024-12-10T15:36:57,281 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 614727b67ed1c48d9acfd143d8b127a7 in 218ms, sequenceid=117, compaction requested=true 2024-12-10T15:36:57,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:57,282 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:57,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:57,283 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:57,283 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:57,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-10T15:36:57,284 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:57,284 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:36:57,284 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,284 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/ceaa648f99194f499d464373824f43a6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=35.3 K 2024-12-10T15:36:57,285 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:57,285 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:36:57,285 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,285 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d68a9133e7d340548a812d05c513dce5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=35.3 K 2024-12-10T15:36:57,285 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ceaa648f99194f499d464373824f43a6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:57,286 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d68a9133e7d340548a812d05c513dce5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:57,286 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 854dde02391b4d9f90192b053ae7f59f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733845016068 2024-12-10T15:36:57,286 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting de80b671530a4e408426cf0069d623a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733845016068 2024-12-10T15:36:57,287 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting aaa1ec7648fb45be94c25f794581c223, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:57,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,287 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T15:36:57,287 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e46d7b8be8f469fb1c6340baee6416b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:57,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:57,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:57,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:57,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/37865321957c4838bf4b6db6355cb1d1 is 50, key is test_row_0/A:col10/1733845017083/Put/seqid=0 2024-12-10T15:36:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:57,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:57,305 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#211 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:57,305 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/68fd6e4c146d4ac580252d608821885f is 50, key is test_row_0/B:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,316 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#212 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:57,317 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/9cc500ab9fe64c89913f7d81e51d311d is 50, key is test_row_0/A:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742085_1261 (size=12001) 2024-12-10T15:36:57,361 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/37865321957c4838bf4b6db6355cb1d1 2024-12-10T15:36:57,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742087_1263 (size=12241) 2024-12-10T15:36:57,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/95ac9a6335c54c678fbc4a0738791994 is 50, key is test_row_0/B:col10/1733845017083/Put/seqid=0 2024-12-10T15:36:57,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742086_1262 (size=12241) 2024-12-10T15:36:57,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742088_1264 (size=12001) 2024-12-10T15:36:57,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:57,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:57,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845077457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845077457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845077458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845077462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845077462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845077564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845077564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845077568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845077568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845077571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845077767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,769 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845077768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845077771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845077772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845077779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:57,807 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/9cc500ab9fe64c89913f7d81e51d311d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/9cc500ab9fe64c89913f7d81e51d311d 2024-12-10T15:36:57,811 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/95ac9a6335c54c678fbc4a0738791994 2024-12-10T15:36:57,814 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 9cc500ab9fe64c89913f7d81e51d311d(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:57,814 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:57,814 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845017282; duration=0sec 2024-12-10T15:36:57,814 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:57,814 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:36:57,814 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:57,817 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:57,817 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:36:57,817 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:57,817 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7e5bad6e79d042789ce04f08e31e7dc9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=35.3 K 2024-12-10T15:36:57,819 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e5bad6e79d042789ce04f08e31e7dc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1733845015434 2024-12-10T15:36:57,820 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e55ef35ea0b54f029596fe18b30685df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733845016068 2024-12-10T15:36:57,821 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcc3d40b5af94cf48861476fa1b13307, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:57,824 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/68fd6e4c146d4ac580252d608821885f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/68fd6e4c146d4ac580252d608821885f 2024-12-10T15:36:57,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23a01f11915c44e9a12a12cffe03b6e8 is 50, key is test_row_0/C:col10/1733845017083/Put/seqid=0 2024-12-10T15:36:57,831 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 68fd6e4c146d4ac580252d608821885f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:57,831 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:57,831 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845017283; duration=0sec 2024-12-10T15:36:57,831 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:57,831 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:36:57,837 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#215 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:57,838 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/125ea3cd8c454eee8061a374a46dba2b is 50, key is test_row_0/C:col10/1733845016435/Put/seqid=0 2024-12-10T15:36:57,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742089_1265 (size=12001) 2024-12-10T15:36:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:57,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742090_1266 (size=12241) 2024-12-10T15:36:58,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845078071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845078072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845078074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845078075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845078084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,285 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23a01f11915c44e9a12a12cffe03b6e8 2024-12-10T15:36:58,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/37865321957c4838bf4b6db6355cb1d1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1 2024-12-10T15:36:58,303 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T15:36:58,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-10T15:36:58,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/95ac9a6335c54c678fbc4a0738791994 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994 2024-12-10T15:36:58,308 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T15:36:58,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23a01f11915c44e9a12a12cffe03b6e8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8 2024-12-10T15:36:58,315 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8, entries=150, sequenceid=127, filesize=11.7 K 2024-12-10T15:36:58,315 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/125ea3cd8c454eee8061a374a46dba2b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/125ea3cd8c454eee8061a374a46dba2b 2024-12-10T15:36:58,316 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 614727b67ed1c48d9acfd143d8b127a7 in 1028ms, sequenceid=127, compaction requested=false 2024-12-10T15:36:58,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:58,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:58,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-10T15:36:58,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-10T15:36:58,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-10T15:36:58,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5200 sec 2024-12-10T15:36:58,321 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 125ea3cd8c454eee8061a374a46dba2b(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:58,321 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:58,321 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845017298; duration=0sec 2024-12-10T15:36:58,322 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:58,322 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:36:58,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.5300 sec 2024-12-10T15:36:58,580 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:58,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:58,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845078593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845078593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845078595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845078594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845078597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/be3d5a7751bf49cf917d116c75606da2 is 50, key is test_row_0/A:col10/1733845017461/Put/seqid=0 2024-12-10T15:36:58,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742091_1267 (size=14541) 2024-12-10T15:36:58,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845078704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845078704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845078704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845078706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845078710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-10T15:36:58,905 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-10T15:36:58,906 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:36:58,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-10T15:36:58,908 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:36:58,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:36:58,909 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:36:58,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:36:58,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845078908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845078909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845078909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845078911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:58,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:58,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845078914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:36:59,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/be3d5a7751bf49cf917d116c75606da2 2024-12-10T15:36:59,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1bf99a1018744290adbc24085ae32dba is 50, key is test_row_0/B:col10/1733845017461/Put/seqid=0 2024-12-10T15:36:59,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T15:36:59,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:59,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742092_1268 (size=12151) 2024-12-10T15:36:59,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1bf99a1018744290adbc24085ae32dba 2024-12-10T15:36:59,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c125d7f832204a0fba7901e7c66be45c is 50, key is test_row_0/C:col10/1733845017461/Put/seqid=0 2024-12-10T15:36:59,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742093_1269 (size=12151) 2024-12-10T15:36:59,167 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c125d7f832204a0fba7901e7c66be45c 2024-12-10T15:36:59,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/be3d5a7751bf49cf917d116c75606da2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2 2024-12-10T15:36:59,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2, entries=200, sequenceid=158, filesize=14.2 K 2024-12-10T15:36:59,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1bf99a1018744290adbc24085ae32dba as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba 2024-12-10T15:36:59,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T15:36:59,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c125d7f832204a0fba7901e7c66be45c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c 2024-12-10T15:36:59,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:36:59,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c, entries=150, sequenceid=158, filesize=11.9 K 2024-12-10T15:36:59,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 614727b67ed1c48d9acfd143d8b127a7 in 633ms, sequenceid=158, compaction requested=true 2024-12-10T15:36:59,215 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:59,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T15:36:59,216 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:59,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:36:59,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:59,216 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:36:59,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:36:59,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:36:59,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:59,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:36:59,217 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:59,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:59,217 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:36:59,217 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,218 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/68fd6e4c146d4ac580252d608821885f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=35.5 K 2024-12-10T15:36:59,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:36:59,218 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:36:59,218 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:36:59,218 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 68fd6e4c146d4ac580252d608821885f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:59,218 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,218 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/9cc500ab9fe64c89913f7d81e51d311d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.9 K 2024-12-10T15:36:59,219 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cc500ab9fe64c89913f7d81e51d311d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:59,219 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 95ac9a6335c54c678fbc4a0738791994, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733845017078 2024-12-10T15:36:59,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:59,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,219 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37865321957c4838bf4b6db6355cb1d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733845017078 2024-12-10T15:36:59,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,219 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bf99a1018744290adbc24085ae32dba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733845017453 2024-12-10T15:36:59,220 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting be3d5a7751bf49cf917d116c75606da2, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733845017453 2024-12-10T15:36:59,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,220 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:59,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/82c12cb5b15a463e828bd9b0452e1d45 is 50, key is test_row_0/A:col10/1733845019217/Put/seqid=0 2024-12-10T15:36:59,241 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#220 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:59,242 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/b02136ab569e4e9e82826a07bfa1a033 is 50, key is test_row_0/A:col10/1733845017461/Put/seqid=0 2024-12-10T15:36:59,244 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#221 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:59,245 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e355b45e94424d0faa5f4b3a72732faa is 50, key is test_row_0/B:col10/1733845017461/Put/seqid=0 2024-12-10T15:36:59,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742095_1271 (size=12493) 2024-12-10T15:36:59,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845079263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845079268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742094_1270 (size=19321) 2024-12-10T15:36:59,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845079263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845079268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/82c12cb5b15a463e828bd9b0452e1d45 2024-12-10T15:36:59,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845079273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742096_1272 (size=12493) 2024-12-10T15:36:59,317 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3c52a50e9e9b479fb0b5198aebc162dd is 50, key is test_row_0/B:col10/1733845019217/Put/seqid=0 2024-12-10T15:36:59,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742097_1273 (size=12151) 2024-12-10T15:36:59,353 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3c52a50e9e9b479fb0b5198aebc162dd 2024-12-10T15:36:59,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c8787b81890848499dc9a83534a5e2bd is 50, key is test_row_0/C:col10/1733845019217/Put/seqid=0 2024-12-10T15:36:59,386 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T15:36:59,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:59,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:36:59,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845079386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845079387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845079388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845079388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,394 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845079388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742098_1274 (size=12151) 2024-12-10T15:36:59,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c8787b81890848499dc9a83534a5e2bd 2024-12-10T15:36:59,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/82c12cb5b15a463e828bd9b0452e1d45 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45 2024-12-10T15:36:59,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45, entries=300, sequenceid=170, filesize=18.9 K 2024-12-10T15:36:59,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3c52a50e9e9b479fb0b5198aebc162dd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd 2024-12-10T15:36:59,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd, entries=150, sequenceid=170, filesize=11.9 K 2024-12-10T15:36:59,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c8787b81890848499dc9a83534a5e2bd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd 2024-12-10T15:36:59,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd, entries=150, sequenceid=170, filesize=11.9 K 2024-12-10T15:36:59,463 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 614727b67ed1c48d9acfd143d8b127a7 in 245ms, sequenceid=170, compaction requested=true 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:36:59,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-10T15:36:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:36:59,540 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-10T15:36:59,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,544 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:36:59,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:36:59,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7790bac6faa44985b61675e3d6556144 is 50, key is test_row_0/A:col10/1733845019260/Put/seqid=0 2024-12-10T15:36:59,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742099_1275 (size=12151) 2024-12-10T15:36:59,589 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7790bac6faa44985b61675e3d6556144 2024-12-10T15:36:59,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c895b77702e1412f88e895918d7fd017 is 50, key is test_row_0/B:col10/1733845019260/Put/seqid=0 2024-12-10T15:36:59,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:36:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:36:59,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845079613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845079614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845079614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845079616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845079620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742100_1276 (size=12151) 2024-12-10T15:36:59,686 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/b02136ab569e4e9e82826a07bfa1a033 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b02136ab569e4e9e82826a07bfa1a033 2024-12-10T15:36:59,701 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into b02136ab569e4e9e82826a07bfa1a033(size=12.2 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:59,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:59,701 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845019216; duration=0sec 2024-12-10T15:36:59,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-10T15:36:59,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:36:59,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:36:59,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:36:59,707 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48544 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:36:59,707 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:36:59,707 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:36:59,707 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/125ea3cd8c454eee8061a374a46dba2b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=47.4 K 2024-12-10T15:36:59,711 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 125ea3cd8c454eee8061a374a46dba2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733845016435 2024-12-10T15:36:59,712 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e355b45e94424d0faa5f4b3a72732faa as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e355b45e94424d0faa5f4b3a72732faa 2024-12-10T15:36:59,712 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23a01f11915c44e9a12a12cffe03b6e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1733845017078 2024-12-10T15:36:59,718 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into e355b45e94424d0faa5f4b3a72732faa(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:36:59,718 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c125d7f832204a0fba7901e7c66be45c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733845017453 2024-12-10T15:36:59,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:36:59,718 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845019216; duration=0sec 2024-12-10T15:36:59,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:36:59,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:36:59,718 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. because compaction request was cancelled 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:36:59,719 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:36:59,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:36:59,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:36:59,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. because compaction request was cancelled 2024-12-10T15:36:59,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:36:59,720 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:36:59,721 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:36:59,721 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:36:59,721 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. because compaction request was cancelled 2024-12-10T15:36:59,721 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:36:59,721 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8787b81890848499dc9a83534a5e2bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733845019215 2024-12-10T15:36:59,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845079720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845079720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845079720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845079723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,731 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:36:59,731 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/0d7ce1626e9842e9964b5ac2cb256d1a is 50, key is test_row_0/C:col10/1733845019217/Put/seqid=0 2024-12-10T15:36:59,734 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845079731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742101_1277 (size=12527) 2024-12-10T15:36:59,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845079927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845079928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845079928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845079929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:36:59,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:36:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845079943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:37:00,039 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c895b77702e1412f88e895918d7fd017 2024-12-10T15:37:00,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e15935728da546aa83be23d8db6bd1b5 is 50, key is test_row_0/C:col10/1733845019260/Put/seqid=0 2024-12-10T15:37:00,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742102_1278 (size=12151) 2024-12-10T15:37:00,187 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/0d7ce1626e9842e9964b5ac2cb256d1a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0d7ce1626e9842e9964b5ac2cb256d1a 2024-12-10T15:37:00,216 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 0d7ce1626e9842e9964b5ac2cb256d1a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:00,216 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:00,216 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=12, startTime=1733845019463; duration=0sec 2024-12-10T15:37:00,216 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:00,216 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:00,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845080232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845080235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845080235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845080238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845080248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,520 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e15935728da546aa83be23d8db6bd1b5 2024-12-10T15:37:00,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7790bac6faa44985b61675e3d6556144 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144 2024-12-10T15:37:00,529 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144, entries=150, sequenceid=194, filesize=11.9 K 2024-12-10T15:37:00,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c895b77702e1412f88e895918d7fd017 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017 2024-12-10T15:37:00,535 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017, entries=150, sequenceid=194, filesize=11.9 K 2024-12-10T15:37:00,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/e15935728da546aa83be23d8db6bd1b5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5 2024-12-10T15:37:00,540 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5, entries=150, sequenceid=194, filesize=11.9 K 2024-12-10T15:37:00,541 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 614727b67ed1c48d9acfd143d8b127a7 in 998ms, sequenceid=194, compaction requested=true 2024-12-10T15:37:00,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:00,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:00,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-10T15:37:00,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-10T15:37:00,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-10T15:37:00,543 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6330 sec 2024-12-10T15:37:00,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.6370 sec 2024-12-10T15:37:00,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:00,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:00,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:00,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0e906b9171480dbb2b362c24a8672c is 50, key is test_row_0/A:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:00,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845080774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845080775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845080776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845080776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845080777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742103_1279 (size=12151) 2024-12-10T15:37:00,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845080883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845080884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845080884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845080884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:00,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:00,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845080895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-10T15:37:01,021 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-10T15:37:01,024 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:01,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-10T15:37:01,034 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:01,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T15:37:01,039 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:01,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845081086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845081086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845081086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845081086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845081099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T15:37:01,205 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0e906b9171480dbb2b362c24a8672c 2024-12-10T15:37:01,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T15:37:01,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:01,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c76c41e72026487ebbf953fe57e99149 is 50, key is test_row_0/B:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742104_1280 (size=12151) 2024-12-10T15:37:01,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T15:37:01,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T15:37:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:01,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845081394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845081397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845081394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845081408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845081410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,522 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T15:37:01,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:01,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T15:37:01,678 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,678 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T15:37:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,678 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:01,685 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c76c41e72026487ebbf953fe57e99149 2024-12-10T15:37:01,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23f9cf36b14645aabac352c637505f90 is 50, key is test_row_0/C:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:01,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742105_1281 (size=12151) 2024-12-10T15:37:01,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23f9cf36b14645aabac352c637505f90 2024-12-10T15:37:01,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0e906b9171480dbb2b362c24a8672c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c 2024-12-10T15:37:01,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c, entries=150, sequenceid=210, filesize=11.9 K 2024-12-10T15:37:01,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c76c41e72026487ebbf953fe57e99149 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149 2024-12-10T15:37:01,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149, entries=150, sequenceid=210, filesize=11.9 K 2024-12-10T15:37:01,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/23f9cf36b14645aabac352c637505f90 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90 2024-12-10T15:37:01,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90, entries=150, sequenceid=210, filesize=11.9 K 2024-12-10T15:37:01,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 614727b67ed1c48d9acfd143d8b127a7 in 1025ms, sequenceid=210, compaction requested=true 2024-12-10T15:37:01,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:01,770 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:01,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:01,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:01,772 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:01,772 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:01,772 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,772 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b02136ab569e4e9e82826a07bfa1a033, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=54.8 K 2024-12-10T15:37:01,772 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b02136ab569e4e9e82826a07bfa1a033, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733845017453 2024-12-10T15:37:01,773 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:01,773 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82c12cb5b15a463e828bd9b0452e1d45, keycount=300, bloomtype=ROW, size=18.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733845018592 2024-12-10T15:37:01,774 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7790bac6faa44985b61675e3d6556144, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733845019254 2024-12-10T15:37:01,774 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db0e906b9171480dbb2b362c24a8672c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:01,775 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:01,775 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:01,775 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,775 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e355b45e94424d0faa5f4b3a72732faa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=47.8 K 2024-12-10T15:37:01,776 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e355b45e94424d0faa5f4b3a72732faa, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733845017453 2024-12-10T15:37:01,777 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c52a50e9e9b479fb0b5198aebc162dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733845019215 2024-12-10T15:37:01,777 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c895b77702e1412f88e895918d7fd017, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733845019254 2024-12-10T15:37:01,778 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c76c41e72026487ebbf953fe57e99149, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:01,799 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#231 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:01,803 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/bdbb7f1cb2ca45328d2fc1aec4ea263b is 50, key is test_row_0/A:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:01,805 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#232 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:01,806 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/09f41356b7024061a3646360ee556066 is 50, key is test_row_0/B:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:01,830 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-10T15:37:01,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,831 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:01,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:01,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742107_1283 (size=12629) 2024-12-10T15:37:01,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/43656bf744c84a72b870556eef5b2732 is 50, key is test_row_0/A:col10/1733845020776/Put/seqid=0 2024-12-10T15:37:01,857 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/09f41356b7024061a3646360ee556066 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/09f41356b7024061a3646360ee556066 2024-12-10T15:37:01,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742106_1282 (size=12629) 2024-12-10T15:37:01,873 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 09f41356b7024061a3646360ee556066(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:01,873 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:01,873 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=12, startTime=1733845021771; duration=0sec 2024-12-10T15:37:01,874 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:01,874 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:01,874 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:01,881 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/bdbb7f1cb2ca45328d2fc1aec4ea263b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/bdbb7f1cb2ca45328d2fc1aec4ea263b 2024-12-10T15:37:01,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:01,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:01,886 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:01,889 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0d7ce1626e9842e9964b5ac2cb256d1a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.0 K 2024-12-10T15:37:01,892 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d7ce1626e9842e9964b5ac2cb256d1a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733845019215 2024-12-10T15:37:01,892 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e15935728da546aa83be23d8db6bd1b5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733845019254 2024-12-10T15:37:01,893 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 23f9cf36b14645aabac352c637505f90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:01,896 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into bdbb7f1cb2ca45328d2fc1aec4ea263b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:01,897 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:01,897 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=12, startTime=1733845021770; duration=0sec 2024-12-10T15:37:01,897 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:01,897 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:01,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742108_1284 (size=12151) 2024-12-10T15:37:01,900 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/43656bf744c84a72b870556eef5b2732 2024-12-10T15:37:01,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:01,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:01,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845081916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845081920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845081921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845081921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:01,925 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:01,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845081923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:01,925 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/cca64d76de49403c84597a8c98025274 is 50, key is test_row_0/C:col10/1733845020744/Put/seqid=0 2024-12-10T15:37:01,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3452520c3e4541529c283d75403e8a38 is 50, key is test_row_0/B:col10/1733845020776/Put/seqid=0 2024-12-10T15:37:01,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742109_1285 (size=12629) 2024-12-10T15:37:01,983 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/cca64d76de49403c84597a8c98025274 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/cca64d76de49403c84597a8c98025274 2024-12-10T15:37:01,992 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into cca64d76de49403c84597a8c98025274(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:01,992 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:01,992 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845021790; duration=0sec 2024-12-10T15:37:01,992 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:01,992 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:02,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742110_1286 (size=12151) 2024-12-10T15:37:02,005 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3452520c3e4541529c283d75403e8a38 2024-12-10T15:37:02,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845082024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bba8d9ab8fd04a0b92394be3bd007a47 is 50, key is test_row_0/C:col10/1733845020776/Put/seqid=0 2024-12-10T15:37:02,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845082027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845082037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845082037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742111_1287 (size=12151) 2024-12-10T15:37:02,080 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bba8d9ab8fd04a0b92394be3bd007a47 2024-12-10T15:37:02,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/43656bf744c84a72b870556eef5b2732 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732 2024-12-10T15:37:02,101 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732, entries=150, sequenceid=233, filesize=11.9 K 2024-12-10T15:37:02,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3452520c3e4541529c283d75403e8a38 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38 2024-12-10T15:37:02,120 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38, entries=150, sequenceid=233, filesize=11.9 K 2024-12-10T15:37:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bba8d9ab8fd04a0b92394be3bd007a47 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47 2024-12-10T15:37:02,128 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47, entries=150, sequenceid=233, filesize=11.9 K 2024-12-10T15:37:02,132 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 614727b67ed1c48d9acfd143d8b127a7 in 301ms, sequenceid=233, compaction requested=false 2024-12-10T15:37:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-10T15:37:02,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-10T15:37:02,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-10T15:37:02,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0940 sec 2024-12-10T15:37:02,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.1120 sec 2024-12-10T15:37:02,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-10T15:37:02,138 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-10T15:37:02,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-10T15:37:02,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:02,147 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:02,148 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:02,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:02,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:02,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:02,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:02,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:02,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/eeede55931bc4846bb14611e72c9ce80 is 50, key is test_row_0/A:col10/1733845022233/Put/seqid=0 2024-12-10T15:37:02,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845082266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845082266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,270 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845082267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,270 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845082266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742112_1288 (size=12151) 2024-12-10T15:37:02,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:02,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:02,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845082372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845082372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845082372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845082375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:37:02,416 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T15:37:02,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:02,456 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:02,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845082575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845082575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845082579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845082579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,613 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/eeede55931bc4846bb14611e72c9ce80 2024-12-10T15:37:02,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/6c7e44693875471faa3d3711c8aad67a is 50, key is test_row_0/B:col10/1733845022233/Put/seqid=0 2024-12-10T15:37:02,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:02,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742113_1289 (size=12151) 2024-12-10T15:37:02,770 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:02,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:02,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845082878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845082880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845082882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845082883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,923 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:02,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:02,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:02,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:02,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:02,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845082938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:03,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:03,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/6c7e44693875471faa3d3711c8aad67a 2024-12-10T15:37:03,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 is 50, key is test_row_0/C:col10/1733845022233/Put/seqid=0 2024-12-10T15:37:03,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742114_1290 (size=12151) 2024-12-10T15:37:03,235 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:03,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:03,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:03,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:03,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845083383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:03,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845083386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:03,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845083386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:03,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845083387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:03,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:03,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,544 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:03,613 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=252 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 2024-12-10T15:37:03,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/eeede55931bc4846bb14611e72c9ce80 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80 2024-12-10T15:37:03,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T15:37:03,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/6c7e44693875471faa3d3711c8aad67a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a 2024-12-10T15:37:03,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T15:37:03,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 2024-12-10T15:37:03,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2, entries=150, sequenceid=252, filesize=11.9 K 2024-12-10T15:37:03,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 614727b67ed1c48d9acfd143d8b127a7 in 1416ms, sequenceid=252, compaction requested=true 2024-12-10T15:37:03,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:03,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:03,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:03,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:03,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:37:03,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:03,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:37:03,652 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:03,655 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:03,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:03,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:03,656 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,656 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/bdbb7f1cb2ca45328d2fc1aec4ea263b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.1 K 2024-12-10T15:37:03,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting bdbb7f1cb2ca45328d2fc1aec4ea263b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:03,657 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 43656bf744c84a72b870556eef5b2732, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733845020772 2024-12-10T15:37:03,657 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting eeede55931bc4846bb14611e72c9ce80, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733845021919 2024-12-10T15:37:03,667 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:03,667 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:03,667 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,668 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/cca64d76de49403c84597a8c98025274, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.1 K 2024-12-10T15:37:03,671 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting cca64d76de49403c84597a8c98025274, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:03,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting bba8d9ab8fd04a0b92394be3bd007a47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733845020772 2024-12-10T15:37:03,679 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3b7ceaf1dd44c6eaed01a9ae5c180c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733845021919 2024-12-10T15:37:03,696 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:03,697 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/e344aeb622ba4f16b7fad6ac63b86291 is 50, key is test_row_0/A:col10/1733845022233/Put/seqid=0 2024-12-10T15:37:03,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:03,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-10T15:37:03,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,697 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:03,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:03,701 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#241 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:03,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7b1325cdcc9e4ea4b9896d5985de1bd2 is 50, key is test_row_0/C:col10/1733845022233/Put/seqid=0 2024-12-10T15:37:03,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8172eaf28e95466fbbc4325c80357180 is 50, key is test_row_0/A:col10/1733845022266/Put/seqid=0 2024-12-10T15:37:03,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742115_1291 (size=12731) 2024-12-10T15:37:03,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742116_1292 (size=12731) 2024-12-10T15:37:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742117_1293 (size=12301) 2024-12-10T15:37:03,824 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8172eaf28e95466fbbc4325c80357180 2024-12-10T15:37:03,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1e57a8f0494a46cb9d290a47590ef0d1 is 50, key is test_row_0/B:col10/1733845022266/Put/seqid=0 2024-12-10T15:37:03,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742118_1294 (size=12301) 2024-12-10T15:37:03,881 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1e57a8f0494a46cb9d290a47590ef0d1 2024-12-10T15:37:03,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/80874c7492024df2852e30b9c51a8eea is 50, key is test_row_0/C:col10/1733845022266/Put/seqid=0 2024-12-10T15:37:03,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742119_1295 (size=12301) 2024-12-10T15:37:03,936 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/80874c7492024df2852e30b9c51a8eea 2024-12-10T15:37:03,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8172eaf28e95466fbbc4325c80357180 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180 2024-12-10T15:37:03,956 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180, entries=150, sequenceid=272, filesize=12.0 K 2024-12-10T15:37:03,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1e57a8f0494a46cb9d290a47590ef0d1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1 2024-12-10T15:37:03,970 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1, entries=150, sequenceid=272, filesize=12.0 K 2024-12-10T15:37:03,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/80874c7492024df2852e30b9c51a8eea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea 2024-12-10T15:37:03,979 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea, entries=150, sequenceid=272, filesize=12.0 K 2024-12-10T15:37:03,991 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 614727b67ed1c48d9acfd143d8b127a7 in 294ms, sequenceid=272, compaction requested=true 2024-12-10T15:37:03,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:03,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:03,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-10T15:37:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-10T15:37:03,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-10T15:37:03,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8440 sec 2024-12-10T15:37:04,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.8550 sec 2024-12-10T15:37:04,159 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/e344aeb622ba4f16b7fad6ac63b86291 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e344aeb622ba4f16b7fad6ac63b86291 2024-12-10T15:37:04,171 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into e344aeb622ba4f16b7fad6ac63b86291(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:04,171 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:04,171 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845023651; duration=0sec 2024-12-10T15:37:04,172 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:04,172 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:04,172 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:04,174 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:04,174 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:04,175 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,175 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/09f41356b7024061a3646360ee556066, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=48.1 K 2024-12-10T15:37:04,176 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 09f41356b7024061a3646360ee556066, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733845019613 2024-12-10T15:37:04,176 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3452520c3e4541529c283d75403e8a38, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733845020772 2024-12-10T15:37:04,176 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c7e44693875471faa3d3711c8aad67a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733845021919 2024-12-10T15:37:04,176 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e57a8f0494a46cb9d290a47590ef0d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733845022261 2024-12-10T15:37:04,185 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:04,186 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/42530832dbcc4d9a87d8297bca32d6f8 is 50, key is test_row_0/B:col10/1733845022266/Put/seqid=0 2024-12-10T15:37:04,211 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7b1325cdcc9e4ea4b9896d5985de1bd2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7b1325cdcc9e4ea4b9896d5985de1bd2 2024-12-10T15:37:04,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742120_1296 (size=12915) 2024-12-10T15:37:04,231 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 7b1325cdcc9e4ea4b9896d5985de1bd2(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:04,231 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:04,231 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845023652; duration=0sec 2024-12-10T15:37:04,231 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:04,231 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:04,240 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/42530832dbcc4d9a87d8297bca32d6f8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/42530832dbcc4d9a87d8297bca32d6f8 2024-12-10T15:37:04,259 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 42530832dbcc4d9a87d8297bca32d6f8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:04,259 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:04,259 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=12, startTime=1733845023651; duration=0sec 2024-12-10T15:37:04,259 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:04,260 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-10T15:37:04,264 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-10T15:37:04,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:04,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-10T15:37:04,274 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:04,274 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:04,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:04,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:04,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:04,407 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:04,417 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7ed7f3285cd046c686e0d32641ac5fc8 is 50, key is test_row_0/A:col10/1733845024405/Put/seqid=0 2024-12-10T15:37:04,427 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T15:37:04,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:04,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,429 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845084433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845084434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845084435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845084436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742121_1297 (size=12301) 2024-12-10T15:37:04,460 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7ed7f3285cd046c686e0d32641ac5fc8 2024-12-10T15:37:04,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1738f86653fa4aa0a20da42b41bf23bf is 50, key is test_row_0/B:col10/1733845024405/Put/seqid=0 2024-12-10T15:37:04,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742122_1298 (size=12301) 2024-12-10T15:37:04,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845084538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845084539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845084542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845084547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:04,585 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T15:37:04,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:04,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,738 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T15:37:04,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:04,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845084744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845084747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845084751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845084757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:04,892 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T15:37:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:04,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:04,892 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:04,926 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1738f86653fa4aa0a20da42b41bf23bf 2024-12-10T15:37:04,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66899716a987450ebc731a63c6576e28 is 50, key is test_row_0/C:col10/1733845024405/Put/seqid=0 2024-12-10T15:37:04,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:04,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845084942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:04,944 DEBUG [Thread-1107 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:04,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742123_1299 (size=12301) 2024-12-10T15:37:04,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66899716a987450ebc731a63c6576e28 2024-12-10T15:37:04,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/7ed7f3285cd046c686e0d32641ac5fc8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8 2024-12-10T15:37:04,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8, entries=150, sequenceid=287, filesize=12.0 K 2024-12-10T15:37:04,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1738f86653fa4aa0a20da42b41bf23bf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf 2024-12-10T15:37:04,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf, entries=150, sequenceid=287, filesize=12.0 K 2024-12-10T15:37:04,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66899716a987450ebc731a63c6576e28 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28 2024-12-10T15:37:04,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28, entries=150, sequenceid=287, filesize=12.0 K 2024-12-10T15:37:05,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 614727b67ed1c48d9acfd143d8b127a7 in 593ms, sequenceid=287, compaction requested=true 2024-12-10T15:37:05,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:05,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:05,000 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:05,000 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:05,000 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:37:05,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:05,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:05,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:05,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:05,001 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:37:05,001 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:37:05,001 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. because compaction request was cancelled 2024-12-10T15:37:05,001 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:05,001 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:05,002 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:05,002 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:05,002 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:05,002 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e344aeb622ba4f16b7fad6ac63b86291, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.5 K 2024-12-10T15:37:05,002 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:05,002 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:05,002 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:05,002 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7b1325cdcc9e4ea4b9896d5985de1bd2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.5 K 2024-12-10T15:37:05,003 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e344aeb622ba4f16b7fad6ac63b86291, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733845021919 2024-12-10T15:37:05,003 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b1325cdcc9e4ea4b9896d5985de1bd2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=252, earliestPutTs=1733845021919 2024-12-10T15:37:05,003 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8172eaf28e95466fbbc4325c80357180, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733845022261 2024-12-10T15:37:05,003 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 80874c7492024df2852e30b9c51a8eea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733845022261 2024-12-10T15:37:05,004 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ed7f3285cd046c686e0d32641ac5fc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733845024398 2024-12-10T15:37:05,004 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 66899716a987450ebc731a63c6576e28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733845024398 2024-12-10T15:37:05,025 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:05,026 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/0c559606029c41ab8250acc80b246c34 is 50, key is test_row_0/A:col10/1733845024405/Put/seqid=0 2024-12-10T15:37:05,028 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:05,028 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/116febc7ff1f4f4498e9975d83f6888e is 50, key is test_row_0/C:col10/1733845024405/Put/seqid=0 2024-12-10T15:37:05,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:05,045 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:05,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:05,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:05,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:05,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742125_1301 (size=12983) 2024-12-10T15:37:05,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742124_1300 (size=12983) 2024-12-10T15:37:05,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/4be977ac07af4777a7d303997903550f is 50, key is test_row_0/A:col10/1733845024432/Put/seqid=0 2024-12-10T15:37:05,067 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/116febc7ff1f4f4498e9975d83f6888e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/116febc7ff1f4f4498e9975d83f6888e 2024-12-10T15:37:05,073 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 116febc7ff1f4f4498e9975d83f6888e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:05,073 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:05,073 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845025001; duration=0sec 2024-12-10T15:37:05,073 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:05,073 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:05,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742126_1302 (size=12301) 2024-12-10T15:37:05,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845085077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845085077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845085079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845085077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,084 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/4be977ac07af4777a7d303997903550f 2024-12-10T15:37:05,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/4d30c3dcc02244149e523e630681674a is 50, key is test_row_0/B:col10/1733845024432/Put/seqid=0 2024-12-10T15:37:05,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742127_1303 (size=12301) 2024-12-10T15:37:05,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845085183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845085187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845085187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845085188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:05,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845085388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845085392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845085393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845085393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,472 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/0c559606029c41ab8250acc80b246c34 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0c559606029c41ab8250acc80b246c34 2024-12-10T15:37:05,478 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 0c559606029c41ab8250acc80b246c34(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:05,478 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:05,478 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845025000; duration=0sec 2024-12-10T15:37:05,478 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:05,478 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:05,560 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/4d30c3dcc02244149e523e630681674a 2024-12-10T15:37:05,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/2bdc0d51d6184b578c5fcbd06c169d14 is 50, key is test_row_0/C:col10/1733845024432/Put/seqid=0 2024-12-10T15:37:05,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742128_1304 (size=12301) 2024-12-10T15:37:05,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845085689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845085699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845085699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:05,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:05,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845085703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,017 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/2bdc0d51d6184b578c5fcbd06c169d14 2024-12-10T15:37:06,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/4be977ac07af4777a7d303997903550f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f 2024-12-10T15:37:06,029 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f, entries=150, sequenceid=311, filesize=12.0 K 2024-12-10T15:37:06,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/4d30c3dcc02244149e523e630681674a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a 2024-12-10T15:37:06,034 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a, entries=150, sequenceid=311, filesize=12.0 K 2024-12-10T15:37:06,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/2bdc0d51d6184b578c5fcbd06c169d14 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14 2024-12-10T15:37:06,042 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14, entries=150, sequenceid=311, filesize=12.0 K 2024-12-10T15:37:06,043 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 614727b67ed1c48d9acfd143d8b127a7 in 998ms, sequenceid=311, compaction requested=true 2024-12-10T15:37:06,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:06,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-10T15:37:06,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-10T15:37:06,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-10T15:37:06,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7800 sec 2024-12-10T15:37:06,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.7960 sec 2024-12-10T15:37:06,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:06,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/53d8faa8b3e5438186328f9ae6aa7755 is 50, key is test_row_0/A:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742129_1305 (size=12301) 2024-12-10T15:37:06,236 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/53d8faa8b3e5438186328f9ae6aa7755 2024-12-10T15:37:06,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,260 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845086254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845086254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845086260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845086260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e1c30c585b2846acad712328a01b5b23 is 50, key is test_row_0/B:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742130_1306 (size=12301) 2024-12-10T15:37:06,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845086361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845086361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845086363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845086364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-10T15:37:06,386 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-10T15:37:06,387 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-10T15:37:06,391 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:06,392 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:06,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:06,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:06,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:06,545 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T15:37:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845086565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845086568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845086568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845086571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:06,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T15:37:06,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:06,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:06,716 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e1c30c585b2846acad712328a01b5b23 2024-12-10T15:37:06,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/0456cc7a48fb40bd9a0f0133a35bca28 is 50, key is test_row_0/C:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742131_1307 (size=12301) 2024-12-10T15:37:06,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/0456cc7a48fb40bd9a0f0133a35bca28 2024-12-10T15:37:06,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/53d8faa8b3e5438186328f9ae6aa7755 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755 2024-12-10T15:37:06,768 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755, entries=150, sequenceid=326, filesize=12.0 K 2024-12-10T15:37:06,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e1c30c585b2846acad712328a01b5b23 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23 2024-12-10T15:37:06,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23, entries=150, sequenceid=326, filesize=12.0 K 2024-12-10T15:37:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/0456cc7a48fb40bd9a0f0133a35bca28 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28 2024-12-10T15:37:06,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28, entries=150, sequenceid=326, filesize=12.0 K 2024-12-10T15:37:06,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 614727b67ed1c48d9acfd143d8b127a7 in 582ms, sequenceid=326, compaction requested=true 2024-12-10T15:37:06,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:06,784 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:06,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:06,785 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:06,785 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:06,786 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,786 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0c559606029c41ab8250acc80b246c34, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.7 K 2024-12-10T15:37:06,787 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c559606029c41ab8250acc80b246c34, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733845024398 2024-12-10T15:37:06,787 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4be977ac07af4777a7d303997903550f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845024427 2024-12-10T15:37:06,788 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53d8faa8b3e5438186328f9ae6aa7755, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:06,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:06,795 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:06,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:06,796 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:06,796 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,796 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/42530832dbcc4d9a87d8297bca32d6f8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=48.7 K 2024-12-10T15:37:06,797 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 42530832dbcc4d9a87d8297bca32d6f8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733845022261 2024-12-10T15:37:06,797 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1738f86653fa4aa0a20da42b41bf23bf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733845024398 2024-12-10T15:37:06,797 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d30c3dcc02244149e523e630681674a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845024427 2024-12-10T15:37:06,798 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e1c30c585b2846acad712328a01b5b23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:06,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:06,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:06,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:06,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:06,817 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:06,817 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/d3024a1315e942a4bd3c28663404b80c is 50, key is test_row_0/A:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,819 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:06,819 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/8f601810acf04e8290b2847a07e9032d is 50, key is test_row_0/B:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742132_1308 (size=13085) 2024-12-10T15:37:06,871 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-10T15:37:06,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,872 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:06,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:06,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:06,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:06,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742133_1309 (size=13051) 2024-12-10T15:37:06,894 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/8f601810acf04e8290b2847a07e9032d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/8f601810acf04e8290b2847a07e9032d 2024-12-10T15:37:06,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/132dee1b1f854ea58f34e4027009bb05 is 50, key is test_row_0/A:col10/1733845026258/Put/seqid=0 2024-12-10T15:37:06,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845086891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845086893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,899 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845086894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:06,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845086894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:06,903 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 8f601810acf04e8290b2847a07e9032d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:06,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:06,903 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=12, startTime=1733845026795; duration=0sec 2024-12-10T15:37:06,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:06,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:06,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:06,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742134_1310 (size=12301) 2024-12-10T15:37:06,907 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/132dee1b1f854ea58f34e4027009bb05 2024-12-10T15:37:06,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:06,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:06,908 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:06,908 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/116febc7ff1f4f4498e9975d83f6888e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.7 K 2024-12-10T15:37:06,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 116febc7ff1f4f4498e9975d83f6888e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733845024398 2024-12-10T15:37:06,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bdc0d51d6184b578c5fcbd06c169d14, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845024427 2024-12-10T15:37:06,909 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0456cc7a48fb40bd9a0f0133a35bca28, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:06,937 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#260 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:06,938 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/5e2be05aac8e4e7db135e36dc8078bca is 50, key is test_row_0/C:col10/1733845026198/Put/seqid=0 2024-12-10T15:37:06,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/d7549b8348b24bc5837fcd830562c109 is 50, key is test_row_0/B:col10/1733845026258/Put/seqid=0 2024-12-10T15:37:06,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742135_1311 (size=13085) 2024-12-10T15:37:07,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845086999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845086999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:07,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845087003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845087003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742136_1312 (size=12301) 2024-12-10T15:37:07,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845087207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845087207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845087208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845087208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,274 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/d3024a1315e942a4bd3c28663404b80c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/d3024a1315e942a4bd3c28663404b80c 2024-12-10T15:37:07,278 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into d3024a1315e942a4bd3c28663404b80c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:07,278 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:07,279 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845026784; duration=0sec 2024-12-10T15:37:07,279 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:07,279 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:07,392 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/5e2be05aac8e4e7db135e36dc8078bca as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/5e2be05aac8e4e7db135e36dc8078bca 2024-12-10T15:37:07,396 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 5e2be05aac8e4e7db135e36dc8078bca(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:07,396 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:07,396 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845026808; duration=0sec 2024-12-10T15:37:07,396 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:07,396 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:07,412 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/d7549b8348b24bc5837fcd830562c109 2024-12-10T15:37:07,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbed9d4f347345dc9651c33e04a584ac is 50, key is test_row_0/C:col10/1733845026258/Put/seqid=0 2024-12-10T15:37:07,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742137_1313 (size=12301) 2024-12-10T15:37:07,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845087511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845087511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845087512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:07,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845087512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:07,859 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbed9d4f347345dc9651c33e04a584ac 2024-12-10T15:37:07,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/132dee1b1f854ea58f34e4027009bb05 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05 2024-12-10T15:37:07,867 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T15:37:07,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/d7549b8348b24bc5837fcd830562c109 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109 2024-12-10T15:37:07,872 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T15:37:07,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/bbed9d4f347345dc9651c33e04a584ac as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac 2024-12-10T15:37:07,877 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac, entries=150, sequenceid=351, filesize=12.0 K 2024-12-10T15:37:07,879 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 614727b67ed1c48d9acfd143d8b127a7 in 1007ms, sequenceid=351, compaction requested=false 2024-12-10T15:37:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:07,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-10T15:37:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-10T15:37:07,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-10T15:37:07,881 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4880 sec 2024-12-10T15:37:07,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.4950 sec 2024-12-10T15:37:08,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:08,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:08,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/e782899e1b8b41c6b800d47b815d0df4 is 50, key is test_row_0/A:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742138_1314 (size=12301) 2024-12-10T15:37:08,043 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/e782899e1b8b41c6b800d47b815d0df4 2024-12-10T15:37:08,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/156d697297d24cd6a4c109e7d94bb7c3 is 50, key is test_row_0/B:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742139_1315 (size=12301) 2024-12-10T15:37:08,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/156d697297d24cd6a4c109e7d94bb7c3 2024-12-10T15:37:08,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a77f0fc85d89451a9b16c2cc271d1b7b is 50, key is test_row_0/C:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742140_1316 (size=12301) 2024-12-10T15:37:08,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a77f0fc85d89451a9b16c2cc271d1b7b 2024-12-10T15:37:08,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/e782899e1b8b41c6b800d47b815d0df4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4 2024-12-10T15:37:08,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,193 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4, entries=150, sequenceid=367, filesize=12.0 K 2024-12-10T15:37:08,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/156d697297d24cd6a4c109e7d94bb7c3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3 2024-12-10T15:37:08,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3, entries=150, sequenceid=367, filesize=12.0 K 2024-12-10T15:37:08,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a77f0fc85d89451a9b16c2cc271d1b7b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b 2024-12-10T15:37:08,201 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b, entries=150, sequenceid=367, filesize=12.0 K 2024-12-10T15:37:08,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 614727b67ed1c48d9acfd143d8b127a7 in 183ms, sequenceid=367, compaction requested=true 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:08,202 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:08,202 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:08,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:08,203 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:08,203 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:08,203 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,203 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/d3024a1315e942a4bd3c28663404b80c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.8 K 2024-12-10T15:37:08,203 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:08,203 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:08,203 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,203 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/8f601810acf04e8290b2847a07e9032d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.8 K 2024-12-10T15:37:08,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f601810acf04e8290b2847a07e9032d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:08,204 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3024a1315e942a4bd3c28663404b80c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:08,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d7549b8348b24bc5837fcd830562c109, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733845026240 2024-12-10T15:37:08,205 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 132dee1b1f854ea58f34e4027009bb05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733845026240 2024-12-10T15:37:08,205 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 156d697297d24cd6a4c109e7d94bb7c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:08,206 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e782899e1b8b41c6b800d47b815d0df4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:08,217 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#266 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:08,218 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c60f6dcfd4b0465190209fd5f2725168 is 50, key is test_row_0/B:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,233 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:08,233 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/dca428075704477ca5963e958a177f7b is 50, key is test_row_0/A:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742142_1318 (size=13187) 2024-12-10T15:37:08,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742141_1317 (size=13153) 2024-12-10T15:37:08,290 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c60f6dcfd4b0465190209fd5f2725168 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c60f6dcfd4b0465190209fd5f2725168 2024-12-10T15:37:08,295 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/dca428075704477ca5963e958a177f7b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/dca428075704477ca5963e958a177f7b 2024-12-10T15:37:08,305 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into c60f6dcfd4b0465190209fd5f2725168(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:08,306 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:08,306 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845028202; duration=0sec 2024-12-10T15:37:08,307 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:08,307 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:08,307 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:08,312 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:08,312 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:08,312 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,312 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/5e2be05aac8e4e7db135e36dc8078bca, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.8 K 2024-12-10T15:37:08,314 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e2be05aac8e4e7db135e36dc8078bca, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1733845025055 2024-12-10T15:37:08,319 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting bbed9d4f347345dc9651c33e04a584ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1733845026240 2024-12-10T15:37:08,321 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into dca428075704477ca5963e958a177f7b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:08,321 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:08,321 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845028202; duration=0sec 2024-12-10T15:37:08,321 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:08,321 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:08,323 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a77f0fc85d89451a9b16c2cc271d1b7b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:08,380 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#268 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:08,380 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/b1db8eb97929460a9a7baf148661fcd9 is 50, key is test_row_0/C:col10/1733845028018/Put/seqid=0 2024-12-10T15:37:08,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:37:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:08,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742143_1319 (size=13187) 2024-12-10T15:37:08,457 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2be23c52f5524385806e9a4498d2a26a is 50, key is test_row_0/A:col10/1733845028394/Put/seqid=0 2024-12-10T15:37:08,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-10T15:37:08,508 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-10T15:37:08,512 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:08,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-10T15:37:08,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:08,519 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:08,523 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:08,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:08,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742144_1320 (size=12301) 2024-12-10T15:37:08,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2be23c52f5524385806e9a4498d2a26a 2024-12-10T15:37:08,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/b6c18ab0c88148cda5c5ff6e13dbb90b is 50, key is test_row_0/B:col10/1733845028394/Put/seqid=0 2024-12-10T15:37:08,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:08,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742145_1321 (size=12301) 2024-12-10T15:37:08,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/b6c18ab0c88148cda5c5ff6e13dbb90b 2024-12-10T15:37:08,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a7d3f76f49944758870921edc5be295c is 50, key is test_row_0/C:col10/1733845028394/Put/seqid=0 2024-12-10T15:37:08,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742146_1322 (size=12301) 2024-12-10T15:37:08,682 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a7d3f76f49944758870921edc5be295c 2024-12-10T15:37:08,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:08,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:08,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/2be23c52f5524385806e9a4498d2a26a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a 2024-12-10T15:37:08,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a, entries=150, sequenceid=395, filesize=12.0 K 2024-12-10T15:37:08,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/b6c18ab0c88148cda5c5ff6e13dbb90b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b 2024-12-10T15:37:08,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b, entries=150, sequenceid=395, filesize=12.0 K 2024-12-10T15:37:08,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/a7d3f76f49944758870921edc5be295c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c 2024-12-10T15:37:08,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c, entries=150, sequenceid=395, filesize=12.0 K 2024-12-10T15:37:08,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 614727b67ed1c48d9acfd143d8b127a7 in 329ms, sequenceid=395, compaction requested=false 2024-12-10T15:37:08,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:08,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:08,779 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:08,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:08,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8fd8c33ea12c4820826c68fbb94037f9 is 50, key is test_row_0/A:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:08,803 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/b1db8eb97929460a9a7baf148661fcd9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b1db8eb97929460a9a7baf148661fcd9 2024-12-10T15:37:08,809 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into b1db8eb97929460a9a7baf148661fcd9(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:08,809 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:08,809 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845028202; duration=0sec 2024-12-10T15:37:08,809 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:08,809 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:08,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:08,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742147_1323 (size=12301) 2024-12-10T15:37:08,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:08,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:08,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:08,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:08,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845088935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845088936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845088939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845088945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58222 deadline: 1733845088975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:08,979 DEBUG [Thread-1107 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:09,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:09,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845089143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845089144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845089147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845089151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,160 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8fd8c33ea12c4820826c68fbb94037f9 2024-12-10T15:37:09,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/21558744c2ad4c5da3424c53c3a6366c is 50, key is test_row_0/B:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:09,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742148_1324 (size=12301) 2024-12-10T15:37:09,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/21558744c2ad4c5da3424c53c3a6366c 2024-12-10T15:37:09,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/447d0e6e60d3486dbc0a6239e72dfdb3 is 50, key is test_row_0/C:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:09,323 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742149_1325 (size=12301) 2024-12-10T15:37:09,338 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/447d0e6e60d3486dbc0a6239e72dfdb3 2024-12-10T15:37:09,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/8fd8c33ea12c4820826c68fbb94037f9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9 2024-12-10T15:37:09,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9, entries=150, sequenceid=407, filesize=12.0 K 2024-12-10T15:37:09,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/21558744c2ad4c5da3424c53c3a6366c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c 2024-12-10T15:37:09,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c, entries=150, sequenceid=407, filesize=12.0 K 2024-12-10T15:37:09,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/447d0e6e60d3486dbc0a6239e72dfdb3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3 2024-12-10T15:37:09,411 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3, entries=150, sequenceid=407, filesize=12.0 K 2024-12-10T15:37:09,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 614727b67ed1c48d9acfd143d8b127a7 in 634ms, sequenceid=407, compaction requested=true 2024-12-10T15:37:09,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:09,413 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:09,413 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:09,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:09,431 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:09,432 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:09,432 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,432 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c60f6dcfd4b0465190209fd5f2725168, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.9 K 2024-12-10T15:37:09,432 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:09,432 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:09,432 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,432 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/dca428075704477ca5963e958a177f7b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.9 K 2024-12-10T15:37:09,435 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c60f6dcfd4b0465190209fd5f2725168, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:09,436 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting dca428075704477ca5963e958a177f7b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:09,439 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting b6c18ab0c88148cda5c5ff6e13dbb90b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733845028394 2024-12-10T15:37:09,440 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2be23c52f5524385806e9a4498d2a26a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733845028394 2024-12-10T15:37:09,443 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fd8c33ea12c4820826c68fbb94037f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:09,444 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 21558744c2ad4c5da3424c53c3a6366c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:09,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:37:09,456 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:09,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:09,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:09,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:09,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:09,457 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:09,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:09,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db8de1758b1e4d89ba3fe1905a075c17 is 50, key is test_row_0/A:col10/1733845028828/Put/seqid=0 2024-12-10T15:37:09,479 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:09,480 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/62217a231143457f9d529b2d92003fb7 is 50, key is test_row_0/B:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:09,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845089481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845089481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,490 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845089484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,495 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:09,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845089482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,495 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/72b8e805e4074079ae5d4830002b7744 is 50, key is test_row_0/A:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:09,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742150_1326 (size=14741) 2024-12-10T15:37:09,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db8de1758b1e4d89ba3fe1905a075c17 2024-12-10T15:37:09,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742151_1327 (size=13255) 2024-12-10T15:37:09,548 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/62217a231143457f9d529b2d92003fb7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/62217a231143457f9d529b2d92003fb7 2024-12-10T15:37:09,556 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 62217a231143457f9d529b2d92003fb7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:09,556 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:09,556 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845029413; duration=0sec 2024-12-10T15:37:09,556 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:09,556 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:09,556 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:09,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742152_1328 (size=13289) 2024-12-10T15:37:09,558 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:09,558 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:09,558 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,558 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b1db8eb97929460a9a7baf148661fcd9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=36.9 K 2024-12-10T15:37:09,559 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting b1db8eb97929460a9a7baf148661fcd9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733845026886 2024-12-10T15:37:09,559 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a7d3f76f49944758870921edc5be295c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1733845028394 2024-12-10T15:37:09,560 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 447d0e6e60d3486dbc0a6239e72dfdb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:09,566 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/72b8e805e4074079ae5d4830002b7744 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/72b8e805e4074079ae5d4830002b7744 2024-12-10T15:37:09,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e3d6e27879b64ad88abf4ea0ecbc3f95 is 50, key is test_row_0/B:col10/1733845028828/Put/seqid=0 2024-12-10T15:37:09,573 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 72b8e805e4074079ae5d4830002b7744(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:09,573 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:09,573 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845029412; duration=0sec 2024-12-10T15:37:09,573 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:09,573 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:09,590 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:09,591 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/8aea3608beee47d2824694548bfdc634 is 50, key is test_row_0/C:col10/1733845028456/Put/seqid=0 2024-12-10T15:37:09,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845089592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845089592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845089595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845089596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:09,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742153_1329 (size=12301) 2024-12-10T15:37:09,642 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,643 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742154_1330 (size=13289) 2024-12-10T15:37:09,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845089796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845089799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845089799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:09,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845089800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,955 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:09,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:09,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:09,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:09,960 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e3d6e27879b64ad88abf4ea0ecbc3f95 2024-12-10T15:37:10,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/9d4ff4d22c034db28a30906b9c800c32 is 50, key is test_row_0/C:col10/1733845028828/Put/seqid=0 2024-12-10T15:37:10,088 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/8aea3608beee47d2824694548bfdc634 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/8aea3608beee47d2824694548bfdc634 2024-12-10T15:37:10,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845090100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845090102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:10,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:10,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742155_1331 (size=12301) 2024-12-10T15:37:10,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,115 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=434 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/9d4ff4d22c034db28a30906b9c800c32 2024-12-10T15:37:10,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,116 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845090112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845090111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,120 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 8aea3608beee47d2824694548bfdc634(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:10,120 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:10,120 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845029413; duration=0sec 2024-12-10T15:37:10,120 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:10,120 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:10,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db8de1758b1e4d89ba3fe1905a075c17 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17 2024-12-10T15:37:10,153 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17, entries=200, sequenceid=434, filesize=14.4 K 2024-12-10T15:37:10,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/e3d6e27879b64ad88abf4ea0ecbc3f95 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95 2024-12-10T15:37:10,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95, entries=150, sequenceid=434, filesize=12.0 K 2024-12-10T15:37:10,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/9d4ff4d22c034db28a30906b9c800c32 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32 2024-12-10T15:37:10,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32, entries=150, sequenceid=434, filesize=12.0 K 2024-12-10T15:37:10,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 614727b67ed1c48d9acfd143d8b127a7 in 740ms, sequenceid=434, compaction requested=false 2024-12-10T15:37:10,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:10,275 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-10T15:37:10,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,280 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:10,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 is 50, key is test_row_0/A:col10/1733845029477/Put/seqid=0 2024-12-10T15:37:10,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742156_1332 (size=12301) 2024-12-10T15:37:10,344 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 2024-12-10T15:37:10,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1c887f89856648e180c767bfe367f39e is 50, key is test_row_0/B:col10/1733845029477/Put/seqid=0 2024-12-10T15:37:10,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742157_1333 (size=12301) 2024-12-10T15:37:10,399 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1c887f89856648e180c767bfe367f39e 2024-12-10T15:37:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 is 50, key is test_row_0/C:col10/1733845029477/Put/seqid=0 2024-12-10T15:37:10,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742158_1334 (size=12301) 2024-12-10T15:37:10,492 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 2024-12-10T15:37:10,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 2024-12-10T15:37:10,523 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9, entries=150, sequenceid=447, filesize=12.0 K 2024-12-10T15:37:10,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1c887f89856648e180c767bfe367f39e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e 2024-12-10T15:37:10,536 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e, entries=150, sequenceid=447, filesize=12.0 K 2024-12-10T15:37:10,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 2024-12-10T15:37:10,554 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5, entries=150, sequenceid=447, filesize=12.0 K 2024-12-10T15:37:10,555 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 614727b67ed1c48d9acfd143d8b127a7 in 276ms, sequenceid=447, compaction requested=true 2024-12-10T15:37:10,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:10,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-10T15:37:10,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-10T15:37:10,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-10T15:37:10,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0380 sec 2024-12-10T15:37:10,589 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.0760 sec 2024-12-10T15:37:10,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-10T15:37:10,617 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-10T15:37:10,620 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:10,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-10T15:37:10,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T15:37:10,621 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:10,622 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:10,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:10,628 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:10,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0a9d0f11ee49839b8ec03a537388b5 is 50, key is test_row_0/A:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742159_1335 (size=14741) 2024-12-10T15:37:10,663 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0a9d0f11ee49839b8ec03a537388b5 2024-12-10T15:37:10,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3715bd856c87422db4cb696f78bee49d is 50, key is test_row_0/B:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845090672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845090680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845090680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845090680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742160_1336 (size=12301) 2024-12-10T15:37:10,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T15:37:10,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3715bd856c87422db4cb696f78bee49d 2024-12-10T15:37:10,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/34bd2b40d5b2431a885dc1ca3df19384 is 50, key is test_row_0/C:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:10,775 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-10T15:37:10,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:10,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:10,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742161_1337 (size=12301) 2024-12-10T15:37:10,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845090781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,789 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=458 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/34bd2b40d5b2431a885dc1ca3df19384 2024-12-10T15:37:10,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845090788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845090789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:10,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845090789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/db0a9d0f11ee49839b8ec03a537388b5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5 2024-12-10T15:37:10,799 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5, entries=200, sequenceid=458, filesize=14.4 K 2024-12-10T15:37:10,800 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/3715bd856c87422db4cb696f78bee49d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d 2024-12-10T15:37:10,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d, entries=150, sequenceid=458, filesize=12.0 K 2024-12-10T15:37:10,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/34bd2b40d5b2431a885dc1ca3df19384 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384 2024-12-10T15:37:10,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384, entries=150, sequenceid=458, filesize=12.0 K 2024-12-10T15:37:10,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 614727b67ed1c48d9acfd143d8b127a7 in 195ms, sequenceid=458, compaction requested=true 2024-12-10T15:37:10,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:10,824 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:10,826 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55072 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:10,826 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:10,826 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,826 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/72b8e805e4074079ae5d4830002b7744, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=53.8 K 2024-12-10T15:37:10,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72b8e805e4074079ae5d4830002b7744, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:10,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db8de1758b1e4d89ba3fe1905a075c17, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733845028820 2024-12-10T15:37:10,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4f91b8cc1bc410ea0e902a6f1ae9df9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733845029477 2024-12-10T15:37:10,829 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db0a9d0f11ee49839b8ec03a537388b5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:10,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:10,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:10,839 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:10,841 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#287 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:10,843 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50158 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:10,843 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:10,843 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,843 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/62217a231143457f9d529b2d92003fb7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=49.0 K 2024-12-10T15:37:10,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 62217a231143457f9d529b2d92003fb7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:10,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e3d6e27879b64ad88abf4ea0ecbc3f95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733845028821 2024-12-10T15:37:10,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c887f89856648e180c767bfe367f39e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733845029477 2024-12-10T15:37:10,845 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3715bd856c87422db4cb696f78bee49d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:10,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/a7edfdc3a50e49d8b75cf1853ff2e471 is 50, key is test_row_0/A:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:10,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:10,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:10,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:10,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:10,862 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#288 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:10,863 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/fc2ce3ab21314035976860c9e0118650 is 50, key is test_row_0/B:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:10,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T15:37:10,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742163_1339 (size=13391) 2024-12-10T15:37:10,933 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:10,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742162_1338 (size=13425) 2024-12-10T15:37:10,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-10T15:37:10,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:10,943 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:37:10,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:10,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:10,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:10,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:10,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/1b886799c61f47fb9605ffa5d62ae9f7 is 50, key is test_row_0/A:col10/1733845030643/Put/seqid=0 2024-12-10T15:37:10,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:10,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:11,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845091011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845091013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742164_1340 (size=12301) 2024-12-10T15:37:11,051 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/1b886799c61f47fb9605ffa5d62ae9f7 2024-12-10T15:37:11,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/803df6a1bc4347abb98b2b77e817ee15 is 50, key is test_row_0/B:col10/1733845030643/Put/seqid=0 2024-12-10T15:37:11,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845091119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845091124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742165_1341 (size=12301) 2024-12-10T15:37:11,139 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/803df6a1bc4347abb98b2b77e817ee15 2024-12-10T15:37:11,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/11da7521b8f04ed6a54c524ba8f5537d is 50, key is test_row_0/C:col10/1733845030643/Put/seqid=0 2024-12-10T15:37:11,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742166_1342 (size=12301) 2024-12-10T15:37:11,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T15:37:11,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845091324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845091327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,342 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/fc2ce3ab21314035976860c9e0118650 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/fc2ce3ab21314035976860c9e0118650 2024-12-10T15:37:11,347 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into fc2ce3ab21314035976860c9e0118650(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:11,348 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:11,348 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=12, startTime=1733845030838; duration=0sec 2024-12-10T15:37:11,348 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:11,348 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:11,348 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:11,348 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:11,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:11,350 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:11,350 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/8aea3608beee47d2824694548bfdc634, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=49.0 K 2024-12-10T15:37:11,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8aea3608beee47d2824694548bfdc634, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733845028456 2024-12-10T15:37:11,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d4ff4d22c034db28a30906b9c800c32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=434, earliestPutTs=1733845028821 2024-12-10T15:37:11,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/a7edfdc3a50e49d8b75cf1853ff2e471 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a7edfdc3a50e49d8b75cf1853ff2e471 2024-12-10T15:37:11,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fc7e8cecfd94a0ab622b6ddd9e1d1f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733845029477 2024-12-10T15:37:11,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 34bd2b40d5b2431a885dc1ca3df19384, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:11,357 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into a7edfdc3a50e49d8b75cf1853ff2e471(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:11,357 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:11,357 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=12, startTime=1733845030823; duration=0sec 2024-12-10T15:37:11,357 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:11,357 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:11,365 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:11,366 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7593da9d52c84c269e62f6eaab2a31e6 is 50, key is test_row_0/C:col10/1733845030627/Put/seqid=0 2024-12-10T15:37:11,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742167_1343 (size=13425) 2024-12-10T15:37:11,395 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/7593da9d52c84c269e62f6eaab2a31e6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7593da9d52c84c269e62f6eaab2a31e6 2024-12-10T15:37:11,400 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 7593da9d52c84c269e62f6eaab2a31e6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:11,400 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:11,400 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=12, startTime=1733845030853; duration=0sec 2024-12-10T15:37:11,400 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:11,401 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:11,612 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/11da7521b8f04ed6a54c524ba8f5537d 2024-12-10T15:37:11,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/1b886799c61f47fb9605ffa5d62ae9f7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7 2024-12-10T15:37:11,622 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7, entries=150, sequenceid=484, filesize=12.0 K 2024-12-10T15:37:11,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/803df6a1bc4347abb98b2b77e817ee15 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15 2024-12-10T15:37:11,628 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15, entries=150, sequenceid=484, filesize=12.0 K 2024-12-10T15:37:11,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/11da7521b8f04ed6a54c524ba8f5537d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d 2024-12-10T15:37:11,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845091628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,634 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d, entries=150, sequenceid=484, filesize=12.0 K 2024-12-10T15:37:11,635 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 614727b67ed1c48d9acfd143d8b127a7 in 691ms, sequenceid=484, compaction requested=false 2024-12-10T15:37:11,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:11,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:11,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-10T15:37:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-10T15:37:11,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-10T15:37:11,640 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0150 sec 2024-12-10T15:37:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:11,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:11,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.0210 sec 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:11,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:11,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc is 50, key is test_row_0/A:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:11,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845091687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742168_1344 (size=12301) 2024-12-10T15:37:11,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc 2024-12-10T15:37:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-10T15:37:11,724 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-10T15:37:11,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/aeff1e7df2e840bd9d885284cf650680 is 50, key is test_row_0/B:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:11,728 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-10T15:37:11,729 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T15:37:11,730 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:11,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:11,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742169_1345 (size=12301) 2024-12-10T15:37:11,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845091799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T15:37:11,885 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:11,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:11,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:11,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:11,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845091999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,002 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845091999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845092003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T15:37:12,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,045 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845092147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/aeff1e7df2e840bd9d885284cf650680 2024-12-10T15:37:12,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/b49aeacb823a4c45b97fc134f63b7d65 is 50, key is test_row_0/C:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:12,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742170_1346 (size=12301) 2024-12-10T15:37:12,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:12,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:12,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845092303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845092303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845092307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T15:37:12,351 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,351 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:12,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:12,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=499 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/b49aeacb823a4c45b97fc134f63b7d65 2024-12-10T15:37:12,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc 2024-12-10T15:37:12,606 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc, entries=150, sequenceid=499, filesize=12.0 K 2024-12-10T15:37:12,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/aeff1e7df2e840bd9d885284cf650680 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680 2024-12-10T15:37:12,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680, entries=150, sequenceid=499, filesize=12.0 K 2024-12-10T15:37:12,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/b49aeacb823a4c45b97fc134f63b7d65 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65 2024-12-10T15:37:12,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65, entries=150, sequenceid=499, filesize=12.0 K 2024-12-10T15:37:12,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 614727b67ed1c48d9acfd143d8b127a7 in 974ms, sequenceid=499, compaction requested=true 2024-12-10T15:37:12,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:12,616 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:12,617 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:12,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:12,617 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:12,617 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:12,618 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,618 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a7edfdc3a50e49d8b75cf1853ff2e471, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.1 K 2024-12-10T15:37:12,618 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37993 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:12,618 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:12,618 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,618 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7edfdc3a50e49d8b75cf1853ff2e471, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:12,618 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/fc2ce3ab21314035976860c9e0118650, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.1 K 2024-12-10T15:37:12,618 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b886799c61f47fb9605ffa5d62ae9f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733845030643 2024-12-10T15:37:12,618 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fc2ce3ab21314035976860c9e0118650, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:12,619 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 803df6a1bc4347abb98b2b77e817ee15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733845030643 2024-12-10T15:37:12,619 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0dd8f9edb734ac9ae9eb7753b8ccbbc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:12,619 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting aeff1e7df2e840bd9d885284cf650680, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:12,633 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#296 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:12,634 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d is 50, key is test_row_0/B:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:12,637 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:12,637 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/0e9eebb8e9d7477f89c3d691e3d2e98d is 50, key is test_row_0/A:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:12,658 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-10T15:37:12,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,659 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:12,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742171_1347 (size=13493) 2024-12-10T15:37:12,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742172_1348 (size=13527) 2024-12-10T15:37:12,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/219296f23ee341da9135b760f1e25bc6 is 50, key is test_row_0/A:col10/1733845031681/Put/seqid=0 2024-12-10T15:37:12,671 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d 2024-12-10T15:37:12,674 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/0e9eebb8e9d7477f89c3d691e3d2e98d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0e9eebb8e9d7477f89c3d691e3d2e98d 2024-12-10T15:37:12,678 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 6bcbc5b2af6d4de9b3e29fd5dd35ac6d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:12,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:12,678 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845032617; duration=0sec 2024-12-10T15:37:12,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:12,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:12,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:12,681 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:12,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742173_1349 (size=12301) 2024-12-10T15:37:12,681 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:12,682 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,682 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7593da9d52c84c269e62f6eaab2a31e6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.1 K 2024-12-10T15:37:12,682 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 0e9eebb8e9d7477f89c3d691e3d2e98d(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:12,682 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:12,682 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845032616; duration=0sec 2024-12-10T15:37:12,682 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:12,682 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:12,682 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7593da9d52c84c269e62f6eaab2a31e6, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=458, earliestPutTs=1733845030619 2024-12-10T15:37:12,688 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 11da7521b8f04ed6a54c524ba8f5537d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1733845030643 2024-12-10T15:37:12,688 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/219296f23ee341da9135b760f1e25bc6 2024-12-10T15:37:12,688 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting b49aeacb823a4c45b97fc134f63b7d65, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:12,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f7c19e80c44d4f50aecad66ae059f43d is 50, key is test_row_0/B:col10/1733845031681/Put/seqid=0 2024-12-10T15:37:12,697 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#300 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:12,698 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/316a60ddabb64f26a664ef65437c66d6 is 50, key is test_row_0/C:col10/1733845031635/Put/seqid=0 2024-12-10T15:37:12,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742174_1350 (size=12301) 2024-12-10T15:37:12,734 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f7c19e80c44d4f50aecad66ae059f43d 2024-12-10T15:37:12,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742175_1351 (size=13527) 2024-12-10T15:37:12,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c1f2147fb0844ffabb52c77212790d13 is 50, key is test_row_0/C:col10/1733845031681/Put/seqid=0 2024-12-10T15:37:12,761 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/316a60ddabb64f26a664ef65437c66d6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/316a60ddabb64f26a664ef65437c66d6 2024-12-10T15:37:12,767 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 316a60ddabb64f26a664ef65437c66d6(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:12,767 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:12,767 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845032617; duration=0sec 2024-12-10T15:37:12,767 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:12,767 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:12,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742176_1352 (size=12301) 2024-12-10T15:37:12,774 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=523 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c1f2147fb0844ffabb52c77212790d13 2024-12-10T15:37:12,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/219296f23ee341da9135b760f1e25bc6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6 2024-12-10T15:37:12,783 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T15:37:12,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/f7c19e80c44d4f50aecad66ae059f43d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d 2024-12-10T15:37:12,790 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T15:37:12,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c1f2147fb0844ffabb52c77212790d13 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13 2024-12-10T15:37:12,796 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13, entries=150, sequenceid=523, filesize=12.0 K 2024-12-10T15:37:12,797 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 614727b67ed1c48d9acfd143d8b127a7 in 138ms, sequenceid=523, compaction requested=false 2024-12-10T15:37:12,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:12,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-10T15:37:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-10T15:37:12,801 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-10T15:37:12,801 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0680 sec 2024-12-10T15:37:12,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.0740 sec 2024-12-10T15:37:12,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:12,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:12,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:12,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:12,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:12,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:12,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/87ce91966b73469fb3f52f00b4501832 is 50, key is test_row_0/A:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:12,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-10T15:37:12,832 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-10T15:37:12,834 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:12,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-10T15:37:12,835 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:12,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:12,835 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:12,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:12,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742177_1353 (size=9857) 2024-12-10T15:37:12,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/87ce91966b73469fb3f52f00b4501832 2024-12-10T15:37:12,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1d23e76b5d85421c92f4dbf0aaa6c27c is 50, key is test_row_0/B:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:12,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742178_1354 (size=9857) 2024-12-10T15:37:12,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845092869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845092869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845092869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:12,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845092972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845092972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845092972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,987 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:12,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T15:37:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:12,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:12,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,140 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T15:37:13,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:13,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:13,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845093164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845093174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845093174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845093175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1d23e76b5d85421c92f4dbf0aaa6c27c 2024-12-10T15:37:13,289 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/541870b332e54b1782ceccf0c3e30bea is 50, key is test_row_0/C:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:13,293 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T15:37:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:13,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742179_1355 (size=9857) 2024-12-10T15:37:13,328 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=537 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/541870b332e54b1782ceccf0c3e30bea 2024-12-10T15:37:13,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/87ce91966b73469fb3f52f00b4501832 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832 2024-12-10T15:37:13,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832, entries=100, sequenceid=537, filesize=9.6 K 2024-12-10T15:37:13,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/1d23e76b5d85421c92f4dbf0aaa6c27c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c 2024-12-10T15:37:13,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c, entries=100, sequenceid=537, filesize=9.6 K 2024-12-10T15:37:13,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/541870b332e54b1782ceccf0c3e30bea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea 2024-12-10T15:37:13,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea, entries=100, sequenceid=537, filesize=9.6 K 2024-12-10T15:37:13,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 614727b67ed1c48d9acfd143d8b127a7 in 565ms, sequenceid=537, compaction requested=true 2024-12-10T15:37:13,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:13,380 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:13,380 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:13,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:13,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:13,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:13,381 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:13,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:13,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:13,381 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35685 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:13,382 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:13,382 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,382 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0e9eebb8e9d7477f89c3d691e3d2e98d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=34.8 K 2024-12-10T15:37:13,382 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e9eebb8e9d7477f89c3d691e3d2e98d, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:13,382 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:13,382 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:13,382 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 219296f23ee341da9135b760f1e25bc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733845031677 2024-12-10T15:37:13,382 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,382 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=34.8 K 2024-12-10T15:37:13,383 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87ce91966b73469fb3f52f00b4501832, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845032809 2024-12-10T15:37:13,383 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bcbc5b2af6d4de9b3e29fd5dd35ac6d, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:13,384 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f7c19e80c44d4f50aecad66ae059f43d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733845031677 2024-12-10T15:37:13,384 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d23e76b5d85421c92f4dbf0aaa6c27c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845032809 2024-12-10T15:37:13,399 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#306 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:13,399 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#305 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:13,400 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/102cc7303b2a4c298bbfa0c9a54a90a2 is 50, key is test_row_0/A:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:13,400 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c6b09813d0e942059a4a3728f125db80 is 50, key is test_row_0/B:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:13,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742180_1356 (size=13595) 2024-12-10T15:37:13,415 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/c6b09813d0e942059a4a3728f125db80 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c6b09813d0e942059a4a3728f125db80 2024-12-10T15:37:13,421 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into c6b09813d0e942059a4a3728f125db80(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:13,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:13,421 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845033381; duration=0sec 2024-12-10T15:37:13,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:13,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:13,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:13,422 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35685 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:13,422 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:13,422 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,422 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/316a60ddabb64f26a664ef65437c66d6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=34.8 K 2024-12-10T15:37:13,423 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 316a60ddabb64f26a664ef65437c66d6, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=499, earliestPutTs=1733845031012 2024-12-10T15:37:13,423 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c1f2147fb0844ffabb52c77212790d13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=523, earliestPutTs=1733845031677 2024-12-10T15:37:13,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742181_1357 (size=13629) 2024-12-10T15:37:13,428 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 541870b332e54b1782ceccf0c3e30bea, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845032809 2024-12-10T15:37:13,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/102cc7303b2a4c298bbfa0c9a54a90a2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/102cc7303b2a4c298bbfa0c9a54a90a2 2024-12-10T15:37:13,437 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:13,438 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/9e4d40262eb54c109e223c09bd5ee8be is 50, key is test_row_0/C:col10/1733845032815/Put/seqid=0 2024-12-10T15:37:13,440 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 102cc7303b2a4c298bbfa0c9a54a90a2(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:13,440 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:13,440 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845033380; duration=0sec 2024-12-10T15:37:13,440 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:13,440 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:13,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:13,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-10T15:37:13,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,446 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:13,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:13,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742182_1358 (size=13629) 2024-12-10T15:37:13,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/421c93f006094425ba1d9f73ec4b5663 is 50, key is test_row_0/A:col10/1733845032837/Put/seqid=0 2024-12-10T15:37:13,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742183_1359 (size=12301) 2024-12-10T15:37:13,472 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/421c93f006094425ba1d9f73ec4b5663 2024-12-10T15:37:13,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:13,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. as already flushing 2024-12-10T15:37:13,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/166029f73de84cd3bedc322156015025 is 50, key is test_row_0/B:col10/1733845032837/Put/seqid=0 2024-12-10T15:37:13,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845093490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742184_1360 (size=12301) 2024-12-10T15:37:13,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845093490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845093490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,494 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/166029f73de84cd3bedc322156015025 2024-12-10T15:37:13,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66229a2652ad45ddab3e70ab087e8571 is 50, key is test_row_0/C:col10/1733845032837/Put/seqid=0 2024-12-10T15:37:13,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742185_1361 (size=12301) 2024-12-10T15:37:13,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845093594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845093594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845093594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845093795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845093796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:13,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845093797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:13,855 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/9e4d40262eb54c109e223c09bd5ee8be as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9e4d40262eb54c109e223c09bd5ee8be 2024-12-10T15:37:13,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 9e4d40262eb54c109e223c09bd5ee8be(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:13,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:13,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845033381; duration=0sec 2024-12-10T15:37:13,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:13,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:13,938 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=565 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66229a2652ad45ddab3e70ab087e8571 2024-12-10T15:37:13,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/421c93f006094425ba1d9f73ec4b5663 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663 2024-12-10T15:37:13,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:13,946 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663, entries=150, sequenceid=565, filesize=12.0 K 2024-12-10T15:37:13,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/166029f73de84cd3bedc322156015025 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025 2024-12-10T15:37:13,950 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025, entries=150, sequenceid=565, filesize=12.0 K 2024-12-10T15:37:13,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/66229a2652ad45ddab3e70ab087e8571 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571 2024-12-10T15:37:13,954 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571, entries=150, sequenceid=565, filesize=12.0 K 2024-12-10T15:37:13,955 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 614727b67ed1c48d9acfd143d8b127a7 in 509ms, sequenceid=565, compaction requested=false 2024-12-10T15:37:13,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:13,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-10T15:37:13,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-10T15:37:13,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-10T15:37:13,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1210 sec 2024-12-10T15:37:13,959 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.1240 sec 2024-12-10T15:37:14,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:14,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:14,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:14,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:14,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:14,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/b5dd0f37f2c8458db0533fb98b8d0174 is 50, key is test_row_0/A:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845094120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845094120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845094121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742186_1362 (size=14741) 2024-12-10T15:37:14,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/b5dd0f37f2c8458db0533fb98b8d0174 2024-12-10T15:37:14,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/37e9011a5e4443e2ac7a934d85e5db53 is 50, key is test_row_0/B:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742187_1363 (size=12301) 2024-12-10T15:37:14,148 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/37e9011a5e4443e2ac7a934d85e5db53 2024-12-10T15:37:14,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/69d0c821583c455d866d6d0e34134afb is 50, key is test_row_0/C:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742188_1364 (size=12301) 2024-12-10T15:37:14,175 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=579 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/69d0c821583c455d866d6d0e34134afb 2024-12-10T15:37:14,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/b5dd0f37f2c8458db0533fb98b8d0174 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174 2024-12-10T15:37:14,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174, entries=200, sequenceid=579, filesize=14.4 K 2024-12-10T15:37:14,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/37e9011a5e4443e2ac7a934d85e5db53 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53 2024-12-10T15:37:14,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53, entries=150, sequenceid=579, filesize=12.0 K 2024-12-10T15:37:14,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/69d0c821583c455d866d6d0e34134afb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb 2024-12-10T15:37:14,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb, entries=150, sequenceid=579, filesize=12.0 K 2024-12-10T15:37:14,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 614727b67ed1c48d9acfd143d8b127a7 in 106ms, sequenceid=579, compaction requested=true 2024-12-10T15:37:14,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:14,205 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:14,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:14,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:14,205 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38197 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:14,206 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:14,206 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c6b09813d0e942059a4a3728f125db80, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.3 K 2024-12-10T15:37:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:14,206 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:14,206 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:14,207 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/102cc7303b2a4c298bbfa0c9a54a90a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=39.7 K 2024-12-10T15:37:14,207 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 102cc7303b2a4c298bbfa0c9a54a90a2, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845031679 2024-12-10T15:37:14,207 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c6b09813d0e942059a4a3728f125db80, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845031679 2024-12-10T15:37:14,207 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 421c93f006094425ba1d9f73ec4b5663, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1733845032836 2024-12-10T15:37:14,207 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 166029f73de84cd3bedc322156015025, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1733845032836 2024-12-10T15:37:14,208 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5dd0f37f2c8458db0533fb98b8d0174, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:14,208 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 37e9011a5e4443e2ac7a934d85e5db53, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:14,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:14,222 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#314 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:14,223 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/1fd986ea5342420d82c4f87ffef40e23 is 50, key is test_row_0/A:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:14,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:14,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:14,229 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#315 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:14,230 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/91fdd4f17d2e4bc38d57e55e2faecb47 is 50, key is test_row_0/B:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f27a3a94354d4ae0ab44a83cb8d2048a is 50, key is test_row_0/A:col10/1733845034120/Put/seqid=0 2024-12-10T15:37:14,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845094246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845094246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,253 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845094251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742189_1365 (size=13731) 2024-12-10T15:37:14,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742191_1367 (size=12301) 2024-12-10T15:37:14,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742190_1366 (size=13697) 2024-12-10T15:37:14,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845094352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845094353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845094360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845094556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845094556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845094566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,636 DEBUG [Thread-1124 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:56346 2024-12-10T15:37:14,636 DEBUG [Thread-1124 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:14,637 DEBUG [Thread-1118 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:56346 2024-12-10T15:37:14,637 DEBUG [Thread-1118 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:14,637 DEBUG [Thread-1122 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:56346 2024-12-10T15:37:14,637 DEBUG [Thread-1122 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:14,638 DEBUG [Thread-1126 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x598cfed4 to 127.0.0.1:56346 2024-12-10T15:37:14,638 DEBUG [Thread-1120 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:56346 2024-12-10T15:37:14,638 DEBUG [Thread-1120 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:14,638 DEBUG [Thread-1126 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:14,709 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/1fd986ea5342420d82c4f87ffef40e23 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1fd986ea5342420d82c4f87ffef40e23 2024-12-10T15:37:14,713 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 1fd986ea5342420d82c4f87ffef40e23(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:14,713 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:14,713 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845034205; duration=0sec 2024-12-10T15:37:14,713 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:14,713 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:14,713 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:14,714 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:14,714 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:14,714 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:14,714 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9e4d40262eb54c109e223c09bd5ee8be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.3 K 2024-12-10T15:37:14,714 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e4d40262eb54c109e223c09bd5ee8be, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=537, earliestPutTs=1733845031679 2024-12-10T15:37:14,715 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66229a2652ad45ddab3e70ab087e8571, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=565, earliestPutTs=1733845032836 2024-12-10T15:37:14,715 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69d0c821583c455d866d6d0e34134afb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:14,719 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#317 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:14,720 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/4bd4306df89144dc954f55d6e0983da3 is 50, key is test_row_0/C:col10/1733845033485/Put/seqid=0 2024-12-10T15:37:14,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742192_1368 (size=13731) 2024-12-10T15:37:14,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f27a3a94354d4ae0ab44a83cb8d2048a 2024-12-10T15:37:14,737 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/91fdd4f17d2e4bc38d57e55e2faecb47 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/91fdd4f17d2e4bc38d57e55e2faecb47 2024-12-10T15:37:14,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/33cbd2e196a541d99934fa0f93b9ebee is 50, key is test_row_0/B:col10/1733845034120/Put/seqid=0 2024-12-10T15:37:14,743 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 91fdd4f17d2e4bc38d57e55e2faecb47(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:14,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:14,743 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845034205; duration=0sec 2024-12-10T15:37:14,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:14,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742193_1369 (size=12301) 2024-12-10T15:37:14,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/33cbd2e196a541d99934fa0f93b9ebee 2024-12-10T15:37:14,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/4cee7fef733e4ca9b89365b161013b7a is 50, key is test_row_0/C:col10/1733845034120/Put/seqid=0 2024-12-10T15:37:14,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742194_1370 (size=12301) 2024-12-10T15:37:14,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58250 deadline: 1733845094859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58264 deadline: 1733845094861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:14,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58280 deadline: 1733845094870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:14,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-10T15:37:14,945 INFO [Thread-1117 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-10T15:37:15,127 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/4bd4306df89144dc954f55d6e0983da3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4bd4306df89144dc954f55d6e0983da3 2024-12-10T15:37:15,130 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 4bd4306df89144dc954f55d6e0983da3(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:15,130 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:15,130 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845034212; duration=0sec 2024-12-10T15:37:15,130 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:15,130 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:15,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=603 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/4cee7fef733e4ca9b89365b161013b7a 2024-12-10T15:37:15,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/f27a3a94354d4ae0ab44a83cb8d2048a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a 2024-12-10T15:37:15,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a, entries=150, sequenceid=603, filesize=12.0 K 2024-12-10T15:37:15,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:15,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/33cbd2e196a541d99934fa0f93b9ebee as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee 2024-12-10T15:37:15,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1733845095172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:15,173 DEBUG [Thread-1115 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:15,175 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee, entries=150, sequenceid=603, filesize=12.0 K 2024-12-10T15:37:15,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/4cee7fef733e4ca9b89365b161013b7a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a 2024-12-10T15:37:15,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a, entries=150, sequenceid=603, filesize=12.0 K 2024-12-10T15:37:15,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 614727b67ed1c48d9acfd143d8b127a7 in 953ms, sequenceid=603, compaction requested=false 2024-12-10T15:37:15,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:15,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:15,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:15,362 DEBUG [Thread-1113 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:56346 2024-12-10T15:37:15,362 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:15,362 DEBUG [Thread-1113 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:15,364 DEBUG [Thread-1109 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:56346 2024-12-10T15:37:15,364 DEBUG [Thread-1109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:15,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/19c102dd4bfe4b938e6d6114fda4d8ee is 50, key is test_row_0/A:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:15,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742195_1371 (size=12301) 2024-12-10T15:37:15,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=619 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/19c102dd4bfe4b938e6d6114fda4d8ee 2024-12-10T15:37:15,374 DEBUG [Thread-1111 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:56346 2024-12-10T15:37:15,375 DEBUG [Thread-1111 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:15,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/ac4d6a984db04bbea42153f887bc1bbb is 50, key is test_row_0/B:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:15,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742196_1372 (size=12301) 2024-12-10T15:37:15,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=619 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/ac4d6a984db04bbea42153f887bc1bbb 2024-12-10T15:37:15,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c5d17a8e5c74450da53f60e24b5819b2 is 50, key is test_row_0/C:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:15,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742197_1373 (size=12301) 2024-12-10T15:37:16,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=619 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c5d17a8e5c74450da53f60e24b5819b2 2024-12-10T15:37:16,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/19c102dd4bfe4b938e6d6114fda4d8ee as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee 2024-12-10T15:37:16,196 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee, entries=150, sequenceid=619, filesize=12.0 K 2024-12-10T15:37:16,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/ac4d6a984db04bbea42153f887bc1bbb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb 2024-12-10T15:37:16,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb, entries=150, sequenceid=619, filesize=12.0 K 2024-12-10T15:37:16,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/c5d17a8e5c74450da53f60e24b5819b2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2 2024-12-10T15:37:16,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2, entries=150, sequenceid=619, filesize=12.0 K 2024-12-10T15:37:16,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=13.42 KB/13740 for 614727b67ed1c48d9acfd143d8b127a7 in 841ms, sequenceid=619, compaction requested=true 2024-12-10T15:37:16,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:16,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:16,203 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:16,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:16,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:16,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:16,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 614727b67ed1c48d9acfd143d8b127a7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:16,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/A is initiating minor compaction (all files) 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/B is initiating minor compaction (all files) 2024-12-10T15:37:16,204 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/A in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:16,204 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/B in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:16,204 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1fd986ea5342420d82c4f87ffef40e23, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.4 K 2024-12-10T15:37:16,204 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/91fdd4f17d2e4bc38d57e55e2faecb47, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.4 K 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fd986ea5342420d82c4f87ffef40e23, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:16,204 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 91fdd4f17d2e4bc38d57e55e2faecb47, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:16,205 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f27a3a94354d4ae0ab44a83cb8d2048a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=603, earliestPutTs=1733845034117 2024-12-10T15:37:16,205 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 33cbd2e196a541d99934fa0f93b9ebee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=603, earliestPutTs=1733845034117 2024-12-10T15:37:16,205 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19c102dd4bfe4b938e6d6114fda4d8ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=619, earliestPutTs=1733845034242 2024-12-10T15:37:16,205 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ac4d6a984db04bbea42153f887bc1bbb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=619, earliestPutTs=1733845034242 2024-12-10T15:37:16,216 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#A#compaction#323 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:16,216 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#B#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:16,216 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/709e1f1c199d4740b40af7210e378819 is 50, key is test_row_0/B:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:16,216 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/6d4b9cbd43b8475b9a2c78f7b680ecec is 50, key is test_row_0/A:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742199_1375 (size=13833) 2024-12-10T15:37:16,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742198_1374 (size=13799) 2024-12-10T15:37:16,624 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/6d4b9cbd43b8475b9a2c78f7b680ecec as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/6d4b9cbd43b8475b9a2c78f7b680ecec 2024-12-10T15:37:16,624 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/709e1f1c199d4740b40af7210e378819 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/709e1f1c199d4740b40af7210e378819 2024-12-10T15:37:16,628 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/B of 614727b67ed1c48d9acfd143d8b127a7 into 709e1f1c199d4740b40af7210e378819(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:16,628 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/A of 614727b67ed1c48d9acfd143d8b127a7 into 6d4b9cbd43b8475b9a2c78f7b680ecec(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:16,628 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/A, priority=13, startTime=1733845036203; duration=0sec 2024-12-10T15:37:16,628 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/B, priority=13, startTime=1733845036204; duration=0sec 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:A 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:B 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:16,628 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 614727b67ed1c48d9acfd143d8b127a7/C is initiating minor compaction (all files) 2024-12-10T15:37:16,629 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 614727b67ed1c48d9acfd143d8b127a7/C in TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:16,629 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4bd4306df89144dc954f55d6e0983da3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp, totalSize=37.4 K 2024-12-10T15:37:16,629 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4bd4306df89144dc954f55d6e0983da3, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=579, earliestPutTs=1733845033485 2024-12-10T15:37:16,629 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cee7fef733e4ca9b89365b161013b7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=603, earliestPutTs=1733845034117 2024-12-10T15:37:16,629 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5d17a8e5c74450da53f60e24b5819b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=619, earliestPutTs=1733845034242 2024-12-10T15:37:16,634 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 614727b67ed1c48d9acfd143d8b127a7#C#compaction#325 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:16,635 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/05a38f3685d543cfa218bfd9f9361bfa is 50, key is test_row_0/C:col10/1733845035361/Put/seqid=0 2024-12-10T15:37:16,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742200_1376 (size=13833) 2024-12-10T15:37:17,041 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/05a38f3685d543cfa218bfd9f9361bfa as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/05a38f3685d543cfa218bfd9f9361bfa 2024-12-10T15:37:17,045 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 614727b67ed1c48d9acfd143d8b127a7/C of 614727b67ed1c48d9acfd143d8b127a7 into 05a38f3685d543cfa218bfd9f9361bfa(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:17,045 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:17,045 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7., storeName=614727b67ed1c48d9acfd143d8b127a7/C, priority=13, startTime=1733845036204; duration=0sec 2024-12-10T15:37:17,045 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:17,045 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 614727b67ed1c48d9acfd143d8b127a7:C 2024-12-10T15:37:18,997 DEBUG [Thread-1107 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:56346 2024-12-10T15:37:18,997 DEBUG [Thread-1107 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:19,184 DEBUG [Thread-1115 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:56346 2024-12-10T15:37:19,184 DEBUG [Thread-1115 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 131 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 110 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4887 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4901 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4817 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4856 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4880 2024-12-10T15:37:19,185 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:37:19,185 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:37:19,185 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:56346 2024-12-10T15:37:19,185 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:19,185 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:37:19,185 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:37:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:19,187 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845039187"}]},"ts":"1733845039187"} 2024-12-10T15:37:19,188 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:37:19,212 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:37:19,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:37:19,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, UNASSIGN}] 2024-12-10T15:37:19,215 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, UNASSIGN 2024-12-10T15:37:19,215 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=614727b67ed1c48d9acfd143d8b127a7, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:19,216 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:37:19,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; CloseRegionProcedure 614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:37:19,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:19,366 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:19,366 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(124): Close 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:19,366 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:37:19,366 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1681): Closing 614727b67ed1c48d9acfd143d8b127a7, disabling compactions & flushes 2024-12-10T15:37:19,367 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. after waiting 0 ms 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:19,367 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(2837): Flushing 614727b67ed1c48d9acfd143d8b127a7 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=A 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=B 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 614727b67ed1c48d9acfd143d8b127a7, store=C 2024-12-10T15:37:19,367 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:19,371 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/947a716fb54c4882af59468af5bad4ef is 50, key is test_row_0/A:col10/1733845035374/Put/seqid=0 2024-12-10T15:37:19,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742201_1377 (size=12301) 2024-12-10T15:37:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:19,774 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/947a716fb54c4882af59468af5bad4ef 2024-12-10T15:37:19,779 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/acfa871692ab4f08add4a9ad2f5dd15d is 50, key is test_row_0/B:col10/1733845035374/Put/seqid=0 2024-12-10T15:37:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742202_1378 (size=12301) 2024-12-10T15:37:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:20,185 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/acfa871692ab4f08add4a9ad2f5dd15d 2024-12-10T15:37:20,191 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/d76f20d0c0254d88a1bc259a7b9bc8ea is 50, key is test_row_0/C:col10/1733845035374/Put/seqid=0 2024-12-10T15:37:20,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742203_1379 (size=12301) 2024-12-10T15:37:20,245 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:37:20,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:20,597 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=629 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/d76f20d0c0254d88a1bc259a7b9bc8ea 2024-12-10T15:37:20,602 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/A/947a716fb54c4882af59468af5bad4ef as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/947a716fb54c4882af59468af5bad4ef 2024-12-10T15:37:20,606 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/947a716fb54c4882af59468af5bad4ef, entries=150, sequenceid=629, filesize=12.0 K 2024-12-10T15:37:20,606 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/B/acfa871692ab4f08add4a9ad2f5dd15d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/acfa871692ab4f08add4a9ad2f5dd15d 2024-12-10T15:37:20,612 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/acfa871692ab4f08add4a9ad2f5dd15d, entries=150, sequenceid=629, filesize=12.0 K 2024-12-10T15:37:20,613 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/.tmp/C/d76f20d0c0254d88a1bc259a7b9bc8ea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/d76f20d0c0254d88a1bc259a7b9bc8ea 2024-12-10T15:37:20,617 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/d76f20d0c0254d88a1bc259a7b9bc8ea, entries=150, sequenceid=629, filesize=12.0 K 2024-12-10T15:37:20,617 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 614727b67ed1c48d9acfd143d8b127a7 in 1250ms, sequenceid=629, compaction requested=false 2024-12-10T15:37:20,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/ceaa648f99194f499d464373824f43a6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/9cc500ab9fe64c89913f7d81e51d311d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b02136ab569e4e9e82826a07bfa1a033, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/bdbb7f1cb2ca45328d2fc1aec4ea263b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e344aeb622ba4f16b7fad6ac63b86291, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0c559606029c41ab8250acc80b246c34, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/d3024a1315e942a4bd3c28663404b80c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/dca428075704477ca5963e958a177f7b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/72b8e805e4074079ae5d4830002b7744, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a7edfdc3a50e49d8b75cf1853ff2e471, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0e9eebb8e9d7477f89c3d691e3d2e98d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/102cc7303b2a4c298bbfa0c9a54a90a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1fd986ea5342420d82c4f87ffef40e23, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee] to archive 2024-12-10T15:37:20,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:20,626 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2f2b9765944b4f86abdc76f0a6efcc7f 2024-12-10T15:37:20,627 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/006a285487ed446e889df267e84413da 2024-12-10T15:37:20,635 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/59304ddb278d4a27bc0dedabef31064a 2024-12-10T15:37:20,635 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/61538ec85bd942e29b56aa426aef718c 2024-12-10T15:37:20,635 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/854dde02391b4d9f90192b053ae7f59f 2024-12-10T15:37:20,635 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/ceaa648f99194f499d464373824f43a6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/ceaa648f99194f499d464373824f43a6 2024-12-10T15:37:20,637 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/aaa1ec7648fb45be94c25f794581c223 2024-12-10T15:37:20,637 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/37865321957c4838bf4b6db6355cb1d1 2024-12-10T15:37:20,638 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/be3d5a7751bf49cf917d116c75606da2 2024-12-10T15:37:20,639 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b02136ab569e4e9e82826a07bfa1a033 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b02136ab569e4e9e82826a07bfa1a033 2024-12-10T15:37:20,640 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7790bac6faa44985b61675e3d6556144 2024-12-10T15:37:20,640 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/82c12cb5b15a463e828bd9b0452e1d45 2024-12-10T15:37:20,644 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0e906b9171480dbb2b362c24a8672c 2024-12-10T15:37:20,644 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/43656bf744c84a72b870556eef5b2732 2024-12-10T15:37:20,644 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/bdbb7f1cb2ca45328d2fc1aec4ea263b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/bdbb7f1cb2ca45328d2fc1aec4ea263b 2024-12-10T15:37:20,647 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8172eaf28e95466fbbc4325c80357180 2024-12-10T15:37:20,647 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0c559606029c41ab8250acc80b246c34 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0c559606029c41ab8250acc80b246c34 2024-12-10T15:37:20,647 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/7ed7f3285cd046c686e0d32641ac5fc8 2024-12-10T15:37:20,647 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/eeede55931bc4846bb14611e72c9ce80 2024-12-10T15:37:20,648 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/4be977ac07af4777a7d303997903550f 2024-12-10T15:37:20,649 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/9cc500ab9fe64c89913f7d81e51d311d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/9cc500ab9fe64c89913f7d81e51d311d 2024-12-10T15:37:20,649 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/d3024a1315e942a4bd3c28663404b80c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/d3024a1315e942a4bd3c28663404b80c 2024-12-10T15:37:20,655 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e344aeb622ba4f16b7fad6ac63b86291 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e344aeb622ba4f16b7fad6ac63b86291 2024-12-10T15:37:20,656 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/dca428075704477ca5963e958a177f7b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/dca428075704477ca5963e958a177f7b 2024-12-10T15:37:20,656 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/132dee1b1f854ea58f34e4027009bb05 2024-12-10T15:37:20,656 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/53d8faa8b3e5438186328f9ae6aa7755 2024-12-10T15:37:20,656 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/e782899e1b8b41c6b800d47b815d0df4 2024-12-10T15:37:20,657 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/2be23c52f5524385806e9a4498d2a26a 2024-12-10T15:37:20,657 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/8fd8c33ea12c4820826c68fbb94037f9 2024-12-10T15:37:20,667 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db8de1758b1e4d89ba3fe1905a075c17 2024-12-10T15:37:20,667 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f4f91b8cc1bc410ea0e902a6f1ae9df9 2024-12-10T15:37:20,667 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/db0a9d0f11ee49839b8ec03a537388b5 2024-12-10T15:37:20,667 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1b886799c61f47fb9605ffa5d62ae9f7 2024-12-10T15:37:20,667 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a7edfdc3a50e49d8b75cf1853ff2e471 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a7edfdc3a50e49d8b75cf1853ff2e471 2024-12-10T15:37:20,669 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0e9eebb8e9d7477f89c3d691e3d2e98d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/0e9eebb8e9d7477f89c3d691e3d2e98d 2024-12-10T15:37:20,669 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/a0dd8f9edb734ac9ae9eb7753b8ccbbc 2024-12-10T15:37:20,671 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/72b8e805e4074079ae5d4830002b7744 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/72b8e805e4074079ae5d4830002b7744 2024-12-10T15:37:20,677 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/421c93f006094425ba1d9f73ec4b5663 2024-12-10T15:37:20,677 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/219296f23ee341da9135b760f1e25bc6 2024-12-10T15:37:20,677 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/87ce91966b73469fb3f52f00b4501832 2024-12-10T15:37:20,677 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/102cc7303b2a4c298bbfa0c9a54a90a2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/102cc7303b2a4c298bbfa0c9a54a90a2 2024-12-10T15:37:20,683 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1fd986ea5342420d82c4f87ffef40e23 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/1fd986ea5342420d82c4f87ffef40e23 2024-12-10T15:37:20,683 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/f27a3a94354d4ae0ab44a83cb8d2048a 2024-12-10T15:37:20,684 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/b5dd0f37f2c8458db0533fb98b8d0174 2024-12-10T15:37:20,684 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/19c102dd4bfe4b938e6d6114fda4d8ee 2024-12-10T15:37:20,686 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d68a9133e7d340548a812d05c513dce5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/68fd6e4c146d4ac580252d608821885f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e355b45e94424d0faa5f4b3a72732faa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/09f41356b7024061a3646360ee556066, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/42530832dbcc4d9a87d8297bca32d6f8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/8f601810acf04e8290b2847a07e9032d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c60f6dcfd4b0465190209fd5f2725168, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/62217a231143457f9d529b2d92003fb7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/fc2ce3ab21314035976860c9e0118650, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c6b09813d0e942059a4a3728f125db80, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/91fdd4f17d2e4bc38d57e55e2faecb47, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb] to archive 2024-12-10T15:37:20,687 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3e145ea3130246438c50ac752797bd20 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d68a9133e7d340548a812d05c513dce5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d68a9133e7d340548a812d05c513dce5 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/de80b671530a4e408426cf0069d623a8 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/095b34fbc6a34196b2f45ffb1cd367a1 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/2e46d7b8be8f469fb1c6340baee6416b 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/68fd6e4c146d4ac580252d608821885f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/68fd6e4c146d4ac580252d608821885f 2024-12-10T15:37:20,703 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f18c2a6344ab4141b7441bce96dc4588 2024-12-10T15:37:20,704 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/9364ae9a176a49ac9a13a42e73e7196a 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e355b45e94424d0faa5f4b3a72732faa to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e355b45e94424d0faa5f4b3a72732faa 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3c52a50e9e9b479fb0b5198aebc162dd 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/95ac9a6335c54c678fbc4a0738791994 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/09f41356b7024061a3646360ee556066 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/09f41356b7024061a3646360ee556066 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1bf99a1018744290adbc24085ae32dba 2024-12-10T15:37:20,711 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c895b77702e1412f88e895918d7fd017 2024-12-10T15:37:20,712 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c76c41e72026487ebbf953fe57e99149 2024-12-10T15:37:20,713 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1e57a8f0494a46cb9d290a47590ef0d1 2024-12-10T15:37:20,714 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6c7e44693875471faa3d3711c8aad67a 2024-12-10T15:37:20,714 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3452520c3e4541529c283d75403e8a38 2024-12-10T15:37:20,714 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/42530832dbcc4d9a87d8297bca32d6f8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/42530832dbcc4d9a87d8297bca32d6f8 2024-12-10T15:37:20,714 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1738f86653fa4aa0a20da42b41bf23bf 2024-12-10T15:37:20,714 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/8f601810acf04e8290b2847a07e9032d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/8f601810acf04e8290b2847a07e9032d 2024-12-10T15:37:20,719 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/4d30c3dcc02244149e523e630681674a 2024-12-10T15:37:20,721 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/b6c18ab0c88148cda5c5ff6e13dbb90b 2024-12-10T15:37:20,722 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/62217a231143457f9d529b2d92003fb7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/62217a231143457f9d529b2d92003fb7 2024-12-10T15:37:20,722 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c60f6dcfd4b0465190209fd5f2725168 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c60f6dcfd4b0465190209fd5f2725168 2024-12-10T15:37:20,722 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e1c30c585b2846acad712328a01b5b23 2024-12-10T15:37:20,729 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/21558744c2ad4c5da3424c53c3a6366c 2024-12-10T15:37:20,730 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/d7549b8348b24bc5837fcd830562c109 2024-12-10T15:37:20,732 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/e3d6e27879b64ad88abf4ea0ecbc3f95 2024-12-10T15:37:20,733 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1c887f89856648e180c767bfe367f39e 2024-12-10T15:37:20,734 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/6bcbc5b2af6d4de9b3e29fd5dd35ac6d 2024-12-10T15:37:20,734 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/3715bd856c87422db4cb696f78bee49d 2024-12-10T15:37:20,735 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/fc2ce3ab21314035976860c9e0118650 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/fc2ce3ab21314035976860c9e0118650 2024-12-10T15:37:20,735 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/156d697297d24cd6a4c109e7d94bb7c3 2024-12-10T15:37:20,735 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c6b09813d0e942059a4a3728f125db80 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/c6b09813d0e942059a4a3728f125db80 2024-12-10T15:37:20,736 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/aeff1e7df2e840bd9d885284cf650680 2024-12-10T15:37:20,736 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/f7c19e80c44d4f50aecad66ae059f43d 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/1d23e76b5d85421c92f4dbf0aaa6c27c 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/91fdd4f17d2e4bc38d57e55e2faecb47 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/91fdd4f17d2e4bc38d57e55e2faecb47 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/803df6a1bc4347abb98b2b77e817ee15 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/33cbd2e196a541d99934fa0f93b9ebee 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/ac4d6a984db04bbea42153f887bc1bbb 2024-12-10T15:37:20,739 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/166029f73de84cd3bedc322156015025 2024-12-10T15:37:20,740 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/37e9011a5e4443e2ac7a934d85e5db53 2024-12-10T15:37:20,744 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7e5bad6e79d042789ce04f08e31e7dc9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/125ea3cd8c454eee8061a374a46dba2b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0d7ce1626e9842e9964b5ac2cb256d1a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/cca64d76de49403c84597a8c98025274, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7b1325cdcc9e4ea4b9896d5985de1bd2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/116febc7ff1f4f4498e9975d83f6888e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/5e2be05aac8e4e7db135e36dc8078bca, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b1db8eb97929460a9a7baf148661fcd9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/8aea3608beee47d2824694548bfdc634, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7593da9d52c84c269e62f6eaab2a31e6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/316a60ddabb64f26a664ef65437c66d6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9e4d40262eb54c109e223c09bd5ee8be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4bd4306df89144dc954f55d6e0983da3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2] to archive 2024-12-10T15:37:20,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:20,755 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/f0c50c30dd8a4798915552841789771b 2024-12-10T15:37:20,755 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbd530eb12d544768f84d53b23713bf6 2024-12-10T15:37:20,755 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/10a6e43d41d74f5f9d36e847c453602b 2024-12-10T15:37:20,756 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/da554007c3584eba9b6bdeadeb6f2fab 2024-12-10T15:37:20,756 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e55ef35ea0b54f029596fe18b30685df 2024-12-10T15:37:20,756 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7e5bad6e79d042789ce04f08e31e7dc9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7e5bad6e79d042789ce04f08e31e7dc9 2024-12-10T15:37:20,757 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/125ea3cd8c454eee8061a374a46dba2b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/125ea3cd8c454eee8061a374a46dba2b 2024-12-10T15:37:20,758 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bcc3d40b5af94cf48861476fa1b13307 2024-12-10T15:37:20,759 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23a01f11915c44e9a12a12cffe03b6e8 2024-12-10T15:37:20,759 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c125d7f832204a0fba7901e7c66be45c 2024-12-10T15:37:20,761 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/e15935728da546aa83be23d8db6bd1b5 2024-12-10T15:37:20,761 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0d7ce1626e9842e9964b5ac2cb256d1a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0d7ce1626e9842e9964b5ac2cb256d1a 2024-12-10T15:37:20,761 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/cca64d76de49403c84597a8c98025274 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/cca64d76de49403c84597a8c98025274 2024-12-10T15:37:20,762 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c8787b81890848499dc9a83534a5e2bd 2024-12-10T15:37:20,762 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/23f9cf36b14645aabac352c637505f90 2024-12-10T15:37:20,762 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7b1325cdcc9e4ea4b9896d5985de1bd2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7b1325cdcc9e4ea4b9896d5985de1bd2 2024-12-10T15:37:20,762 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bba8d9ab8fd04a0b92394be3bd007a47 2024-12-10T15:37:20,768 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c3b7ceaf1dd44c6eaed01a9ae5c180c2 2024-12-10T15:37:20,768 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/2bdc0d51d6184b578c5fcbd06c169d14 2024-12-10T15:37:20,768 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/116febc7ff1f4f4498e9975d83f6888e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/116febc7ff1f4f4498e9975d83f6888e 2024-12-10T15:37:20,768 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/5e2be05aac8e4e7db135e36dc8078bca to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/5e2be05aac8e4e7db135e36dc8078bca 2024-12-10T15:37:20,769 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/80874c7492024df2852e30b9c51a8eea 2024-12-10T15:37:20,769 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/bbed9d4f347345dc9651c33e04a584ac 2024-12-10T15:37:20,770 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/0456cc7a48fb40bd9a0f0133a35bca28 2024-12-10T15:37:20,770 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66899716a987450ebc731a63c6576e28 2024-12-10T15:37:20,771 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a7d3f76f49944758870921edc5be295c 2024-12-10T15:37:20,771 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/a77f0fc85d89451a9b16c2cc271d1b7b 2024-12-10T15:37:20,771 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b1db8eb97929460a9a7baf148661fcd9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b1db8eb97929460a9a7baf148661fcd9 2024-12-10T15:37:20,771 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/8aea3608beee47d2824694548bfdc634 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/8aea3608beee47d2824694548bfdc634 2024-12-10T15:37:20,772 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/447d0e6e60d3486dbc0a6239e72dfdb3 2024-12-10T15:37:20,773 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/3fc7e8cecfd94a0ab622b6ddd9e1d1f5 2024-12-10T15:37:20,773 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9d4ff4d22c034db28a30906b9c800c32 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/11da7521b8f04ed6a54c524ba8f5537d 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/34bd2b40d5b2431a885dc1ca3df19384 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/316a60ddabb64f26a664ef65437c66d6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/316a60ddabb64f26a664ef65437c66d6 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c1f2147fb0844ffabb52c77212790d13 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7593da9d52c84c269e62f6eaab2a31e6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/7593da9d52c84c269e62f6eaab2a31e6 2024-12-10T15:37:20,774 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/b49aeacb823a4c45b97fc134f63b7d65 2024-12-10T15:37:20,775 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9e4d40262eb54c109e223c09bd5ee8be to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/9e4d40262eb54c109e223c09bd5ee8be 2024-12-10T15:37:20,775 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/541870b332e54b1782ceccf0c3e30bea 2024-12-10T15:37:20,775 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4bd4306df89144dc954f55d6e0983da3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4bd4306df89144dc954f55d6e0983da3 2024-12-10T15:37:20,775 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/66229a2652ad45ddab3e70ab087e8571 2024-12-10T15:37:20,776 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/69d0c821583c455d866d6d0e34134afb 2024-12-10T15:37:20,779 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/4cee7fef733e4ca9b89365b161013b7a 2024-12-10T15:37:20,779 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/c5d17a8e5c74450da53f60e24b5819b2 2024-12-10T15:37:20,788 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/recovered.edits/632.seqid, newMaxSeqId=632, maxSeqId=1 2024-12-10T15:37:20,789 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7. 2024-12-10T15:37:20,789 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] regionserver.HRegion(1635): Region close journal for 614727b67ed1c48d9acfd143d8b127a7: 2024-12-10T15:37:20,790 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=88}] handler.UnassignRegionHandler(170): Closed 614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:20,791 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=87 updating hbase:meta row=614727b67ed1c48d9acfd143d8b127a7, regionState=CLOSED 2024-12-10T15:37:20,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-10T15:37:20,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseRegionProcedure 614727b67ed1c48d9acfd143d8b127a7, server=bf0fec90ff6d,46239,1733844953049 in 1.5750 sec 2024-12-10T15:37:20,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-10T15:37:20,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=614727b67ed1c48d9acfd143d8b127a7, UNASSIGN in 1.5780 sec 2024-12-10T15:37:20,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-10T15:37:20,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5840 sec 2024-12-10T15:37:20,799 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845040799"}]},"ts":"1733845040799"} 2024-12-10T15:37:20,800 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:37:20,829 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:37:20,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6450 sec 2024-12-10T15:37:21,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-10T15:37:21,291 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-10T15:37:21,291 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:37:21,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,292 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=89, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T15:37:21,293 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=89, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,294 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:21,295 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/recovered.edits] 2024-12-10T15:37:21,297 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/6d4b9cbd43b8475b9a2c78f7b680ecec to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/6d4b9cbd43b8475b9a2c78f7b680ecec 2024-12-10T15:37:21,297 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/947a716fb54c4882af59468af5bad4ef to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/A/947a716fb54c4882af59468af5bad4ef 2024-12-10T15:37:21,299 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/709e1f1c199d4740b40af7210e378819 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/709e1f1c199d4740b40af7210e378819 2024-12-10T15:37:21,299 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/acfa871692ab4f08add4a9ad2f5dd15d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/B/acfa871692ab4f08add4a9ad2f5dd15d 2024-12-10T15:37:21,300 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/05a38f3685d543cfa218bfd9f9361bfa to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/05a38f3685d543cfa218bfd9f9361bfa 2024-12-10T15:37:21,301 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/d76f20d0c0254d88a1bc259a7b9bc8ea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/C/d76f20d0c0254d88a1bc259a7b9bc8ea 2024-12-10T15:37:21,302 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/recovered.edits/632.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7/recovered.edits/632.seqid 2024-12-10T15:37:21,303 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/614727b67ed1c48d9acfd143d8b127a7 2024-12-10T15:37:21,303 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:37:21,304 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=89, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,306 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:37:21,308 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:37:21,308 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=89, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,308 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:37:21,309 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733845041308"}]},"ts":"9223372036854775807"} 2024-12-10T15:37:21,310 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:37:21,310 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 614727b67ed1c48d9acfd143d8b127a7, NAME => 'TestAcidGuarantees,,1733845012358.614727b67ed1c48d9acfd143d8b127a7.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:37:21,310 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:37:21,310 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733845041310"}]},"ts":"9223372036854775807"} 2024-12-10T15:37:21,312 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:37:21,371 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=89, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 80 msec 2024-12-10T15:37:21,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-10T15:37:21,394 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-10T15:37:21,401 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=245 (was 248), OpenFileDescriptor=457 (was 470), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1076 (was 1191), ProcessCount=11 (was 11), AvailableMemoryMB=1899 (was 1942) 2024-12-10T15:37:21,410 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=245, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=1076, ProcessCount=11, AvailableMemoryMB=1899 2024-12-10T15:37:21,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:37:21,411 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:37:21,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=90, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:21,412 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:37:21,412 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:21,413 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:37:21,413 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 90 2024-12-10T15:37:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:21,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742204_1380 (size=963) 2024-12-10T15:37:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:21,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:21,818 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:37:21,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742205_1381 (size=53) 2024-12-10T15:37:22,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cd3195b888bdde70b3568541344b4bc7, disabling compactions & flushes 2024-12-10T15:37:22,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. after waiting 0 ms 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:22,235 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:37:22,235 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733845042235"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733845042235"}]},"ts":"1733845042235"} 2024-12-10T15:37:22,244 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:37:22,244 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:37:22,245 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845042244"}]},"ts":"1733845042244"} 2024-12-10T15:37:22,246 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:37:22,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, ASSIGN}] 2024-12-10T15:37:22,303 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, ASSIGN 2024-12-10T15:37:22,304 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:37:22,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:37:22,454 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:22,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; OpenRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:37:22,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:22,607 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:22,610 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,611 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7285): Opening region: {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:37:22,611 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,611 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:37:22,611 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7327): checking encryption for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,611 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(7330): checking classloading for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,614 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,620 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:22,621 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName A 2024-12-10T15:37:22,621 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:22,623 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:22,623 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,630 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:22,630 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName B 2024-12-10T15:37:22,630 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:22,634 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:22,635 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,644 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:22,644 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName C 2024-12-10T15:37:22,644 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:22,647 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:22,648 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,648 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,649 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,652 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:37:22,653 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1085): writing seq id for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:22,655 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:37:22,663 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1102): Opened cd3195b888bdde70b3568541344b4bc7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68128325, jitterRate=0.015191152691841125}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:37:22,664 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegion(1001): Region open journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:22,672 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., pid=92, masterSystemTime=1733845042607 2024-12-10T15:37:22,680 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,680 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=92}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:22,691 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:22,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-10T15:37:22,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; OpenRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 in 243 msec 2024-12-10T15:37:22,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-10T15:37:22,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, ASSIGN in 416 msec 2024-12-10T15:37:22,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:37:22,714 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845042714"}]},"ts":"1733845042714"} 2024-12-10T15:37:22,716 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:37:22,742 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=90, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:37:22,745 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3310 sec 2024-12-10T15:37:23,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-10T15:37:23,527 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 90 completed 2024-12-10T15:37:23,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75b14fbd to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b6cf8cb 2024-12-10T15:37:23,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f422b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:23,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:23,599 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:23,600 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:37:23,601 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:37:23,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:37:23,602 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:37:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:23,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742206_1382 (size=999) 2024-12-10T15:37:24,013 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T15:37:24,013 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T15:37:24,016 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:37:24,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, REOPEN/MOVE}] 2024-12-10T15:37:24,018 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, REOPEN/MOVE 2024-12-10T15:37:24,019 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,020 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:37:24,020 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:37:24,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,172 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,172 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:37:24,172 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing cd3195b888bdde70b3568541344b4bc7, disabling compactions & flushes 2024-12-10T15:37:24,172 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,172 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,172 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. after waiting 0 ms 2024-12-10T15:37:24,172 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,176 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T15:37:24,176 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,176 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:24,176 WARN [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionServer(3786): Not adding moved region record: cd3195b888bdde70b3568541344b4bc7 to self. 2024-12-10T15:37:24,178 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,178 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=CLOSED 2024-12-10T15:37:24,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-10T15:37:24,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 in 159 msec 2024-12-10T15:37:24,185 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, REOPEN/MOVE; state=CLOSED, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=true 2024-12-10T15:37:24,336 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; OpenRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:37:24,489 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,491 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,491 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7285): Opening region: {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:37:24,492 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,492 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:37:24,492 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7327): checking encryption for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,492 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7330): checking classloading for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,493 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,493 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:24,493 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName A 2024-12-10T15:37:24,494 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:24,495 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:24,495 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,495 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:24,495 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName B 2024-12-10T15:37:24,495 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:24,496 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:24,496 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,496 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:37:24,496 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd3195b888bdde70b3568541344b4bc7 columnFamilyName C 2024-12-10T15:37:24,496 DEBUG [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:24,497 INFO [StoreOpener-cd3195b888bdde70b3568541344b4bc7-1 {}] regionserver.HStore(327): Store=cd3195b888bdde70b3568541344b4bc7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:37:24,497 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,497 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,498 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,499 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:37:24,500 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1085): writing seq id for cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,501 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1102): Opened cd3195b888bdde70b3568541344b4bc7; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72194436, jitterRate=0.07578092813491821}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:37:24,502 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1001): Region open journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:24,502 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., pid=97, masterSystemTime=1733845044488 2024-12-10T15:37:24,504 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,504 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,504 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=OPEN, openSeqNum=5, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-12-10T15:37:24,506 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; OpenRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 in 168 msec 2024-12-10T15:37:24,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-10T15:37:24,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, REOPEN/MOVE in 489 msec 2024-12-10T15:37:24,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-10T15:37:24,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 492 msec 2024-12-10T15:37:24,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 906 msec 2024-12-10T15:37:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-10T15:37:24,511 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-12-10T15:37:24,545 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,546 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-12-10T15:37:24,554 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-12-10T15:37:24,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,564 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-12-10T15:37:24,571 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,571 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-12-10T15:37:24,579 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c907e21 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683f8469 2024-12-10T15:37:24,587 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6584e9ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,588 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-12-10T15:37:24,596 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,597 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-12-10T15:37:24,604 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,605 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-12-10T15:37:24,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,613 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4cb9e50e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3eab689a 2024-12-10T15:37:24,621 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39387e4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:37:24,627 DEBUG [hconnection-0x129a88c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,628 DEBUG [hconnection-0xea6e281-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,629 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,631 DEBUG [hconnection-0x316bfe46-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,632 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,632 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,640 DEBUG [hconnection-0x3e15cf92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,640 DEBUG [hconnection-0xacb6a4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,641 DEBUG [hconnection-0x4001c522-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,642 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,643 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35044, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,643 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,644 DEBUG [hconnection-0x7582ce47-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,645 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35070, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,648 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-10T15:37:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:24,649 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:24,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:24,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:24,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,652 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:24,652 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:24,653 DEBUG [hconnection-0x18aa68d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,654 DEBUG [hconnection-0x41e60d61-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,654 DEBUG [hconnection-0x75e7f6bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:37:24,655 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35084, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,655 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35082, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845104660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,663 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:37:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845104661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845104661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845104661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845104661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210acb134ca6158453ea2f706dabebe1df1_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845044641/Put/seqid=0 2024-12-10T15:37:24,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742207_1383 (size=12154) 2024-12-10T15:37:24,689 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:24,693 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210acb134ca6158453ea2f706dabebe1df1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210acb134ca6158453ea2f706dabebe1df1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,693 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/4d361a4415a34e0fbe4b25ba381fc22b, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:24,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/4d361a4415a34e0fbe4b25ba381fc22b is 175, key is test_row_0/A:col10/1733845044641/Put/seqid=0 2024-12-10T15:37:24,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742208_1384 (size=30955) 2024-12-10T15:37:24,711 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/4d361a4415a34e0fbe4b25ba381fc22b 2024-12-10T15:37:24,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/be88d19f48c3438a8ec3de17ea8efc33 is 50, key is test_row_0/B:col10/1733845044641/Put/seqid=0 2024-12-10T15:37:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:24,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845104765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742209_1385 (size=12001) 2024-12-10T15:37:24,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/be88d19f48c3438a8ec3de17ea8efc33 2024-12-10T15:37:24,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845104767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845104767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845104768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845104771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/a476be899d9c4eb78bf568cc0a129dd4 is 50, key is test_row_0/C:col10/1733845044641/Put/seqid=0 2024-12-10T15:37:24,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-10T15:37:24,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:24,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:24,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:24,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742210_1386 (size=12001) 2024-12-10T15:37:24,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/a476be899d9c4eb78bf568cc0a129dd4 2024-12-10T15:37:24,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/4d361a4415a34e0fbe4b25ba381fc22b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b 2024-12-10T15:37:24,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b, entries=150, sequenceid=17, filesize=30.2 K 2024-12-10T15:37:24,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/be88d19f48c3438a8ec3de17ea8efc33 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33 2024-12-10T15:37:24,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33, entries=150, sequenceid=17, filesize=11.7 K 2024-12-10T15:37:24,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/a476be899d9c4eb78bf568cc0a129dd4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4 2024-12-10T15:37:24,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4, entries=150, sequenceid=17, filesize=11.7 K 2024-12-10T15:37:24,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for cd3195b888bdde70b3568541344b4bc7 in 197ms, sequenceid=17, compaction requested=false 2024-12-10T15:37:24,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:24,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:24,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-10T15:37:24,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:24,962 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:37:24,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:24,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:24,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:24,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:24,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845104979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845104980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845104981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845104984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:24,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845104985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:24,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210eb7dc945e8d242f7b1a8e05c69e9159d_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845044660/Put/seqid=0 2024-12-10T15:37:25,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742211_1387 (size=12154) 2024-12-10T15:37:25,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845105088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845105087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845105091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:25,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845105287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845105290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845105293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845105295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845105308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:25,448 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210eb7dc945e8d242f7b1a8e05c69e9159d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eb7dc945e8d242f7b1a8e05c69e9159d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:25,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e8374c8ab6e545d0a116689ccbfb9141, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:25,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e8374c8ab6e545d0a116689ccbfb9141 is 175, key is test_row_0/A:col10/1733845044660/Put/seqid=0 2024-12-10T15:37:25,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742212_1388 (size=30955) 2024-12-10T15:37:25,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845105602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845105602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845105623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:25,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845105800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845105803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:25,879 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e8374c8ab6e545d0a116689ccbfb9141 2024-12-10T15:37:25,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/7bc34745f8084a72bdbdf8db2216af88 is 50, key is test_row_0/B:col10/1733845044660/Put/seqid=0 2024-12-10T15:37:25,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742213_1389 (size=12001) 2024-12-10T15:37:26,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845106115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845106123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:26,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845106130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,369 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/7bc34745f8084a72bdbdf8db2216af88 2024-12-10T15:37:26,378 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:37:26,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ca14d95568c043dfb1bf282a8687be91 is 50, key is test_row_0/C:col10/1733845044660/Put/seqid=0 2024-12-10T15:37:26,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742214_1390 (size=12001) 2024-12-10T15:37:26,446 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ca14d95568c043dfb1bf282a8687be91 2024-12-10T15:37:26,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e8374c8ab6e545d0a116689ccbfb9141 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141 2024-12-10T15:37:26,512 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141, entries=150, sequenceid=42, filesize=30.2 K 2024-12-10T15:37:26,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/7bc34745f8084a72bdbdf8db2216af88 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88 2024-12-10T15:37:26,531 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T15:37:26,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ca14d95568c043dfb1bf282a8687be91 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91 2024-12-10T15:37:26,551 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91, entries=150, sequenceid=42, filesize=11.7 K 2024-12-10T15:37:26,552 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for cd3195b888bdde70b3568541344b4bc7 in 1590ms, sequenceid=42, compaction requested=false 2024-12-10T15:37:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:26,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-10T15:37:26,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-10T15:37:26,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-10T15:37:26,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9020 sec 2024-12-10T15:37:26,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 1.9080 sec 2024-12-10T15:37:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-10T15:37:26,771 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-10T15:37:26,778 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:26,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-10T15:37:26,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:26,780 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:26,781 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:26,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:26,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:26,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:26,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:26,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:26,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:26,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:26,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:26,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:26,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dceb3d8d19924363b1af98155f16c74f_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:26,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:26,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742215_1391 (size=17034) 2024-12-10T15:37:26,903 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:26,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845106899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845106900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,928 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210dceb3d8d19924363b1af98155f16c74f_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dceb3d8d19924363b1af98155f16c74f_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:26,931 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/90c1ee844dfb4fccadf971c63ec6daa2, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:26,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/90c1ee844dfb4fccadf971c63ec6daa2 is 175, key is test_row_0/A:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:26,933 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:26,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:26,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:26,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:26,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:26,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:26,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:26,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742216_1392 (size=48139) 2024-12-10T15:37:27,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845107007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845107016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:27,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845107138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845107139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845107147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845107223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845107234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,357 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/90c1ee844dfb4fccadf971c63ec6daa2 2024-12-10T15:37:27,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3328861de8d04a1ea02fd54d54f37709 is 50, key is test_row_0/B:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:27,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742217_1393 (size=12001) 2024-12-10T15:37:27,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845107543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845107544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,743 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,744 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,747 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3328861de8d04a1ea02fd54d54f37709 2024-12-10T15:37:27,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e14913ec1a2a440ab472fbdf4f506401 is 50, key is test_row_0/C:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:27,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:27,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:27,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:27,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:27,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742218_1394 (size=12001) 2024-12-10T15:37:27,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:28,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845108052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:28,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:28,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845108056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:28,079 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:28,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:28,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:28,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,238 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:28,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:28,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e14913ec1a2a440ab472fbdf4f506401 2024-12-10T15:37:28,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/90c1ee844dfb4fccadf971c63ec6daa2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2 2024-12-10T15:37:28,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2, entries=250, sequenceid=56, filesize=47.0 K 2024-12-10T15:37:28,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3328861de8d04a1ea02fd54d54f37709 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709 2024-12-10T15:37:28,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709, entries=150, sequenceid=56, filesize=11.7 K 2024-12-10T15:37:28,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e14913ec1a2a440ab472fbdf4f506401 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401 2024-12-10T15:37:28,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401, entries=150, sequenceid=56, filesize=11.7 K 2024-12-10T15:37:28,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for cd3195b888bdde70b3568541344b4bc7 in 1530ms, sequenceid=56, compaction requested=true 2024-12-10T15:37:28,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:28,356 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:28,357 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:28,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:28,358 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,358 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,358 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=35.2 K 2024-12-10T15:37:28,358 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=107.5 K 2024-12-10T15:37:28,358 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2] 2024-12-10T15:37:28,358 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting be88d19f48c3438a8ec3de17ea8efc33, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733845044641 2024-12-10T15:37:28,360 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bc34745f8084a72bdbdf8db2216af88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733845044659 2024-12-10T15:37:28,360 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3328861de8d04a1ea02fd54d54f37709, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:28,363 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d361a4415a34e0fbe4b25ba381fc22b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733845044641 2024-12-10T15:37:28,370 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#338 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:28,371 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/23fc5c1f31f34ae19d4ea9d418940999 is 50, key is test_row_0/B:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:28,371 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8374c8ab6e545d0a116689ccbfb9141, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733845044659 2024-12-10T15:37:28,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90c1ee844dfb4fccadf971c63ec6daa2, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:28,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742219_1395 (size=12104) 2024-12-10T15:37:28,388 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:28,393 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:28,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-10T15:37:28,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,394 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:28,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:28,400 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/23fc5c1f31f34ae19d4ea9d418940999 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/23fc5c1f31f34ae19d4ea9d418940999 2024-12-10T15:37:28,408 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121045a85ea0605440f1b52d78fb388d5965_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:28,409 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121045a85ea0605440f1b52d78fb388d5965_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:28,410 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121045a85ea0605440f1b52d78fb388d5965_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:28,421 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 23fc5c1f31f34ae19d4ea9d418940999(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:28,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:28,421 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=13, startTime=1733845048357; duration=0sec 2024-12-10T15:37:28,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:28,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:28,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:28,422 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:28,422 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:28,422 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:28,422 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=35.2 K 2024-12-10T15:37:28,423 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a476be899d9c4eb78bf568cc0a129dd4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733845044641 2024-12-10T15:37:28,424 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ca14d95568c043dfb1bf282a8687be91, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733845044659 2024-12-10T15:37:28,424 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e14913ec1a2a440ab472fbdf4f506401, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:28,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d661369fcbe640e1bd90e547ffce7ae0_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845046899/Put/seqid=0 2024-12-10T15:37:28,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742220_1396 (size=4469) 2024-12-10T15:37:28,458 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#341 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:28,458 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/2dde417be2eb40e9b8e99f1e29782934 is 50, key is test_row_0/C:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:28,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742221_1397 (size=12154) 2024-12-10T15:37:28,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742222_1398 (size=12104) 2024-12-10T15:37:28,866 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#339 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:28,866 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e705c0834bd4407ea0fb056cb1ae402a is 175, key is test_row_0/A:col10/1733845046826/Put/seqid=0 2024-12-10T15:37:28,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:28,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742223_1399 (size=31058) 2024-12-10T15:37:28,908 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d661369fcbe640e1bd90e547ffce7ae0_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d661369fcbe640e1bd90e547ffce7ae0_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:28,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/12f0edb366034fa4ae005dfc4f58a7bb, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:28,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/12f0edb366034fa4ae005dfc4f58a7bb is 175, key is test_row_0/A:col10/1733845046899/Put/seqid=0 2024-12-10T15:37:28,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:28,932 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e705c0834bd4407ea0fb056cb1ae402a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a 2024-12-10T15:37:28,938 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into e705c0834bd4407ea0fb056cb1ae402a(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:28,938 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:28,938 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=13, startTime=1733845048356; duration=0sec 2024-12-10T15:37:28,938 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:28,938 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:28,949 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/2dde417be2eb40e9b8e99f1e29782934 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2dde417be2eb40e9b8e99f1e29782934 2024-12-10T15:37:28,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742224_1400 (size=30955) 2024-12-10T15:37:28,957 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/12f0edb366034fa4ae005dfc4f58a7bb 2024-12-10T15:37:28,961 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into 2dde417be2eb40e9b8e99f1e29782934(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:28,961 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:28,961 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=13, startTime=1733845048357; duration=0sec 2024-12-10T15:37:28,961 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:28,961 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:28,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/f3d9814c740e41519372eef1139334e7 is 50, key is test_row_0/B:col10/1733845046899/Put/seqid=0 2024-12-10T15:37:28,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742225_1401 (size=12001) 2024-12-10T15:37:28,992 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/f3d9814c740e41519372eef1139334e7 2024-12-10T15:37:29,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/6884c9c40d7e4e12b3662e855a27a98d is 50, key is test_row_0/C:col10/1733845046899/Put/seqid=0 2024-12-10T15:37:29,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:29,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:29,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742226_1402 (size=12001) 2024-12-10T15:37:29,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845109114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845109116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845109151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,154 DEBUG [Thread-1717 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:29,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845109156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,162 DEBUG [Thread-1711 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:29,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845109164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,174 DEBUG [Thread-1719 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4195 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:29,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845109223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845109226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845109438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845109440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,485 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/6884c9c40d7e4e12b3662e855a27a98d 2024-12-10T15:37:29,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/12f0edb366034fa4ae005dfc4f58a7bb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb 2024-12-10T15:37:29,503 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb, entries=150, sequenceid=78, filesize=30.2 K 2024-12-10T15:37:29,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/f3d9814c740e41519372eef1139334e7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7 2024-12-10T15:37:29,508 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:37:29,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/6884c9c40d7e4e12b3662e855a27a98d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d 2024-12-10T15:37:29,513 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:37:29,514 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for cd3195b888bdde70b3568541344b4bc7 in 1120ms, sequenceid=78, compaction requested=false 2024-12-10T15:37:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-10T15:37:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-10T15:37:29,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-10T15:37:29,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7440 sec 2024-12-10T15:37:29,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 2.7490 sec 2024-12-10T15:37:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:29,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:29,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:29,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e38b3e433cb54481b62ba22e4292621c_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:29,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742227_1403 (size=14594) 2024-12-10T15:37:29,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845109860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845109862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845109971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:29,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:29,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845109971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845110176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845110177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,225 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:30,240 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e38b3e433cb54481b62ba22e4292621c_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e38b3e433cb54481b62ba22e4292621c_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:30,243 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/7cd505af8c0f472b8daf0252fc614324, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:30,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/7cd505af8c0f472b8daf0252fc614324 is 175, key is test_row_0/A:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742228_1404 (size=39549) 2024-12-10T15:37:30,286 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/7cd505af8c0f472b8daf0252fc614324 2024-12-10T15:37:30,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6eb23344bb3f4534a1fddb3c4a7f3511 is 50, key is test_row_0/B:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742229_1405 (size=12001) 2024-12-10T15:37:30,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6eb23344bb3f4534a1fddb3c4a7f3511 2024-12-10T15:37:30,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/2525341f90b74aa28e9d24738e09bc87 is 50, key is test_row_0/C:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742230_1406 (size=12001) 2024-12-10T15:37:30,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/2525341f90b74aa28e9d24738e09bc87 2024-12-10T15:37:30,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/7cd505af8c0f472b8daf0252fc614324 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324 2024-12-10T15:37:30,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324, entries=200, sequenceid=96, filesize=38.6 K 2024-12-10T15:37:30,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6eb23344bb3f4534a1fddb3c4a7f3511 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511 2024-12-10T15:37:30,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511, entries=150, sequenceid=96, filesize=11.7 K 2024-12-10T15:37:30,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/2525341f90b74aa28e9d24738e09bc87 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87 2024-12-10T15:37:30,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87, entries=150, sequenceid=96, filesize=11.7 K 2024-12-10T15:37:30,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for cd3195b888bdde70b3568541344b4bc7 in 687ms, sequenceid=96, compaction requested=true 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:30,442 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:30,442 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:30,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:30,445 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:30,445 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:30,445 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:30,445 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=99.2 K 2024-12-10T15:37:30,445 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:30,445 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324] 2024-12-10T15:37:30,445 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:30,445 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:30,445 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:30,446 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/23fc5c1f31f34ae19d4ea9d418940999, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=35.3 K 2024-12-10T15:37:30,452 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e705c0834bd4407ea0fb056cb1ae402a, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:30,452 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 23fc5c1f31f34ae19d4ea9d418940999, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:30,453 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f3d9814c740e41519372eef1139334e7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845046887 2024-12-10T15:37:30,453 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f0edb366034fa4ae005dfc4f58a7bb, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845046887 2024-12-10T15:37:30,453 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6eb23344bb3f4534a1fddb3c4a7f3511, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:30,453 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cd505af8c0f472b8daf0252fc614324, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:30,464 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:30,464 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/52b12045c76c456c86e45dab17f5aec5 is 50, key is test_row_0/B:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,485 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742231_1407 (size=12207) 2024-12-10T15:37:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:30,504 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210500a4efbe6b54f70b2d8cc901573e589_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:30,506 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210500a4efbe6b54f70b2d8cc901573e589_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:30,506 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210500a4efbe6b54f70b2d8cc901573e589_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:30,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:30,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:30,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:30,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742232_1408 (size=4469) 2024-12-10T15:37:30,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a1ea6014df4940778cbf8050b5a0484d_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845050498/Put/seqid=0 2024-12-10T15:37:30,567 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#348 average throughput is 0.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:30,568 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/63a6ba9588414943a251bc969baaadde is 175, key is test_row_0/A:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845110568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845110569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742233_1409 (size=12154) 2024-12-10T15:37:30,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742234_1410 (size=31161) 2024-12-10T15:37:30,657 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/63a6ba9588414943a251bc969baaadde as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde 2024-12-10T15:37:30,675 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into 63a6ba9588414943a251bc969baaadde(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:30,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:30,675 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=13, startTime=1733845050442; duration=0sec 2024-12-10T15:37:30,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:30,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:30,675 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:37:30,679 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:37:30,679 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:30,679 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:30,680 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2dde417be2eb40e9b8e99f1e29782934, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=35.3 K 2024-12-10T15:37:30,680 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2dde417be2eb40e9b8e99f1e29782934, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733845044973 2024-12-10T15:37:30,680 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6884c9c40d7e4e12b3662e855a27a98d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845046887 2024-12-10T15:37:30,681 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2525341f90b74aa28e9d24738e09bc87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:30,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845110675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,690 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845110676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,742 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:30,742 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/56e2aaca8451416c86832526eea2ddc4 is 50, key is test_row_0/C:col10/1733845049095/Put/seqid=0 2024-12-10T15:37:30,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742235_1411 (size=12207) 2024-12-10T15:37:30,806 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/56e2aaca8451416c86832526eea2ddc4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/56e2aaca8451416c86832526eea2ddc4 2024-12-10T15:37:30,812 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into 56e2aaca8451416c86832526eea2ddc4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:30,812 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:30,812 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=13, startTime=1733845050442; duration=0sec 2024-12-10T15:37:30,812 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:30,812 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:30,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845110896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:30,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845110896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:30,927 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/52b12045c76c456c86e45dab17f5aec5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/52b12045c76c456c86e45dab17f5aec5 2024-12-10T15:37:30,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-10T15:37:30,932 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-10T15:37:30,944 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:30,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-10T15:37:30,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T15:37:30,955 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:30,960 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 52b12045c76c456c86e45dab17f5aec5(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:30,960 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:30,960 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=13, startTime=1733845050442; duration=0sec 2024-12-10T15:37:30,960 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:30,960 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:30,963 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:30,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:30,992 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:31,015 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a1ea6014df4940778cbf8050b5a0484d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a1ea6014df4940778cbf8050b5a0484d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:31,019 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/618bf03de2d343a6b830ed119e01f172, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:31,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/618bf03de2d343a6b830ed119e01f172 is 175, key is test_row_0/A:col10/1733845050498/Put/seqid=0 2024-12-10T15:37:31,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T15:37:31,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742236_1412 (size=30955) 2024-12-10T15:37:31,072 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/618bf03de2d343a6b830ed119e01f172 2024-12-10T15:37:31,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6646b75cae8e417fb22bc4cca4d5caea is 50, key is test_row_0/B:col10/1733845050498/Put/seqid=0 2024-12-10T15:37:31,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T15:37:31,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:31,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:31,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:31,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:31,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:31,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742237_1413 (size=12001) 2024-12-10T15:37:31,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6646b75cae8e417fb22bc4cca4d5caea 2024-12-10T15:37:31,157 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/84d9484d1d4f4161af17c388968881a9 is 50, key is test_row_0/C:col10/1733845050498/Put/seqid=0 2024-12-10T15:37:31,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:31,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845111207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:31,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845111207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742238_1414 (size=12001) 2024-12-10T15:37:31,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/84d9484d1d4f4161af17c388968881a9 2024-12-10T15:37:31,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T15:37:31,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/618bf03de2d343a6b830ed119e01f172 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172 2024-12-10T15:37:31,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172, entries=150, sequenceid=120, filesize=30.2 K 2024-12-10T15:37:31,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6646b75cae8e417fb22bc4cca4d5caea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea 2024-12-10T15:37:31,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea, entries=150, sequenceid=120, filesize=11.7 K 2024-12-10T15:37:31,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/84d9484d1d4f4161af17c388968881a9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9 2024-12-10T15:37:31,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9, entries=150, sequenceid=120, filesize=11.7 K 2024-12-10T15:37:31,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for cd3195b888bdde70b3568541344b4bc7 in 770ms, sequenceid=120, compaction requested=false 2024-12-10T15:37:31,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:31,299 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,300 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-10T15:37:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:31,301 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:31,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:31,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101b869337e8034224a582bbc237eac74b_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845050557/Put/seqid=0 2024-12-10T15:37:31,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742239_1415 (size=12304) 2024-12-10T15:37:31,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T15:37:31,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:31,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:31,763 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101b869337e8034224a582bbc237eac74b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101b869337e8034224a582bbc237eac74b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:31,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6ae0843f0c9740cd9fbe18655e2a169b, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:31,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6ae0843f0c9740cd9fbe18655e2a169b is 175, key is test_row_0/A:col10/1733845050557/Put/seqid=0 2024-12-10T15:37:31,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742240_1416 (size=31105) 2024-12-10T15:37:31,808 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6ae0843f0c9740cd9fbe18655e2a169b 2024-12-10T15:37:31,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/5f4fb62d3f534136b4df8b8748aedfd9 is 50, key is test_row_0/B:col10/1733845050557/Put/seqid=0 2024-12-10T15:37:31,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742241_1417 (size=12151) 2024-12-10T15:37:31,892 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/5f4fb62d3f534136b4df8b8748aedfd9 2024-12-10T15:37:31,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/67e9f9bbee8949bc926b6161301c0035 is 50, key is test_row_0/C:col10/1733845050557/Put/seqid=0 2024-12-10T15:37:31,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:31,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845111920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:31,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845111936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:31,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742242_1418 (size=12151) 2024-12-10T15:37:31,982 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/67e9f9bbee8949bc926b6161301c0035 2024-12-10T15:37:31,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6ae0843f0c9740cd9fbe18655e2a169b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b 2024-12-10T15:37:32,004 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b, entries=150, sequenceid=135, filesize=30.4 K 2024-12-10T15:37:32,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/5f4fb62d3f534136b4df8b8748aedfd9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9 2024-12-10T15:37:32,013 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9, entries=150, sequenceid=135, filesize=11.9 K 2024-12-10T15:37:32,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/67e9f9bbee8949bc926b6161301c0035 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035 2024-12-10T15:37:32,024 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035, entries=150, sequenceid=135, filesize=11.9 K 2024-12-10T15:37:32,025 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cd3195b888bdde70b3568541344b4bc7 in 725ms, sequenceid=135, compaction requested=true 2024-12-10T15:37:32,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:32,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-10T15:37:32,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-10T15:37:32,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-10T15:37:32,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0650 sec 2024-12-10T15:37:32,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.0860 sec 2024-12-10T15:37:32,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:32,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:32,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-10T15:37:32,056 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-10T15:37:32,060 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-10T15:37:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:32,067 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:32,072 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:32,072 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:32,085 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121005b0ece4fd9f478f8449cec934f9cf4b_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:32,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845112100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845112104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742243_1419 (size=12304) 2024-12-10T15:37:32,130 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:32,135 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121005b0ece4fd9f478f8449cec934f9cf4b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005b0ece4fd9f478f8449cec934f9cf4b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:32,136 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/bf0937f134364a2698b5b9330be28e95, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:32,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/bf0937f134364a2698b5b9330be28e95 is 175, key is test_row_0/A:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:32,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:32,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742244_1420 (size=31105) 2024-12-10T15:37:32,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845112213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845112213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:32,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:32,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:32,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:32,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:32,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:37:32,416 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T15:37:32,439 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845112432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:32,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845112438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,551 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:32,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:32,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,585 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/bf0937f134364a2698b5b9330be28e95 2024-12-10T15:37:32,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b27ef32c1f7405587a9e9c18dc016ec is 50, key is test_row_0/B:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:32,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742245_1421 (size=12151) 2024-12-10T15:37:32,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b27ef32c1f7405587a9e9c18dc016ec 2024-12-10T15:37:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:32,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ebe10dd97a4e4bf88428cc39a3f927ed is 50, key is test_row_0/C:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:32,709 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:32,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:32,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742246_1422 (size=12151) 2024-12-10T15:37:32,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ebe10dd97a4e4bf88428cc39a3f927ed 2024-12-10T15:37:32,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/bf0937f134364a2698b5b9330be28e95 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95 2024-12-10T15:37:32,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95, entries=150, sequenceid=160, filesize=30.4 K 2024-12-10T15:37:32,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b27ef32c1f7405587a9e9c18dc016ec as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec 2024-12-10T15:37:32,733 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec, entries=150, sequenceid=160, filesize=11.9 K 2024-12-10T15:37:32,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/ebe10dd97a4e4bf88428cc39a3f927ed as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed 2024-12-10T15:37:32,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed, entries=150, sequenceid=160, filesize=11.9 K 2024-12-10T15:37:32,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cd3195b888bdde70b3568541344b4bc7 in 708ms, sequenceid=160, compaction requested=true 2024-12-10T15:37:32,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:32,749 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:37:32,749 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:32,750 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:32,750 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:32,750 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,751 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=121.4 K 2024-12-10T15:37:32,751 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,751 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95] 2024-12-10T15:37:32,751 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:32,751 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63a6ba9588414943a251bc969baaadde, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:32,751 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 618bf03de2d343a6b830ed119e01f172, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733845049766 2024-12-10T15:37:32,752 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ae0843f0c9740cd9fbe18655e2a169b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733845050557 2024-12-10T15:37:32,752 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf0937f134364a2698b5b9330be28e95, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:32,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:32,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:32,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:32,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:32,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:32,771 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:32,771 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:32,771 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,772 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/56e2aaca8451416c86832526eea2ddc4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=47.4 K 2024-12-10T15:37:32,775 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 56e2aaca8451416c86832526eea2ddc4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:32,777 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:32,779 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 84d9484d1d4f4161af17c388968881a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733845049766 2024-12-10T15:37:32,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 67e9f9bbee8949bc926b6161301c0035, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733845050557 2024-12-10T15:37:32,787 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ebe10dd97a4e4bf88428cc39a3f927ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:32,789 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121068fe6dfdf1e344cea91b61c51a5721ed_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:32,791 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121068fe6dfdf1e344cea91b61c51a5721ed_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:32,791 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068fe6dfdf1e344cea91b61c51a5721ed_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:32,834 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210caf0ea1c3b984739973cded055c88d52_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845052754/Put/seqid=0 2024-12-10T15:37:32,866 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:32,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:32,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:32,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:32,871 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:32,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742247_1423 (size=4469) 2024-12-10T15:37:32,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742248_1424 (size=14794) 2024-12-10T15:37:32,907 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#361 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:32,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/d845e482be5e44f88dcf063c7591030b is 50, key is test_row_0/C:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742249_1425 (size=12493) 2024-12-10T15:37:32,954 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/d845e482be5e44f88dcf063c7591030b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/d845e482be5e44f88dcf063c7591030b 2024-12-10T15:37:33,025 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into d845e482be5e44f88dcf063c7591030b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:33,025 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:33,025 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=12, startTime=1733845052749; duration=0sec 2024-12-10T15:37:33,025 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:33,025 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:33,025 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:33,032 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,048 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:33,048 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:33,048 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,049 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/52b12045c76c456c86e45dab17f5aec5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=47.4 K 2024-12-10T15:37:33,049 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b12045c76c456c86e45dab17f5aec5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733845049095 2024-12-10T15:37:33,050 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6646b75cae8e417fb22bc4cca4d5caea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733845049766 2024-12-10T15:37:33,051 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f4fb62d3f534136b4df8b8748aedfd9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733845050557 2024-12-10T15:37:33,052 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b27ef32c1f7405587a9e9c18dc016ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:33,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845113068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845113077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,100 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#362 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:33,100 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/73508a06ed1c47af88f8d2da92543edc is 50, key is test_row_0/B:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:33,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742250_1426 (size=12493) 2024-12-10T15:37:33,168 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/73508a06ed1c47af88f8d2da92543edc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/73508a06ed1c47af88f8d2da92543edc 2024-12-10T15:37:33,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:33,176 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 73508a06ed1c47af88f8d2da92543edc(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:33,177 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:33,177 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=12, startTime=1733845052749; duration=0sec 2024-12-10T15:37:33,177 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:33,177 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:33,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845113180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,196 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845113191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,204 DEBUG [Thread-1717 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8219 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:33,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845113194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845113204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845113206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,215 DEBUG [Thread-1719 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8235 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:33,215 DEBUG [Thread-1711 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8230 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:37:33,299 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#359 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:33,300 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/30a45ebd830b46c399a5d919b10dbf92 is 175, key is test_row_0/A:col10/1733845052037/Put/seqid=0 2024-12-10T15:37:33,310 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:33,314 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210caf0ea1c3b984739973cded055c88d52_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210caf0ea1c3b984739973cded055c88d52_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:33,315 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/fe812ad9c6c94fe2a11d163576c2fd2c, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:33,315 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/fe812ad9c6c94fe2a11d163576c2fd2c is 175, key is test_row_0/A:col10/1733845052754/Put/seqid=0 2024-12-10T15:37:33,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742251_1427 (size=31447) 2024-12-10T15:37:33,363 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742252_1428 (size=39749) 2024-12-10T15:37:33,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,376 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/fe812ad9c6c94fe2a11d163576c2fd2c 2024-12-10T15:37:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ce1538b527b243f48265011d5a9da3af is 50, key is test_row_0/B:col10/1733845052754/Put/seqid=0 2024-12-10T15:37:33,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845113394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845113420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742253_1429 (size=12151) 2024-12-10T15:37:33,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,683 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845113702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:33,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845113736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,795 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/30a45ebd830b46c399a5d919b10dbf92 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92 2024-12-10T15:37:33,823 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into 30a45ebd830b46c399a5d919b10dbf92(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-10T15:37:33,824 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:33,824 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=12, startTime=1733845052749; duration=1sec 2024-12-10T15:37:33,824 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:33,824 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:33,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ce1538b527b243f48265011d5a9da3af 2024-12-10T15:37:33,855 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:33,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:33,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:33,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:33,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:33,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/88e45d8bbf9047a78c1faaf05499abbd is 50, key is test_row_0/C:col10/1733845052754/Put/seqid=0 2024-12-10T15:37:33,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742254_1430 (size=12151) 2024-12-10T15:37:34,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:34,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:34,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:34,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:34,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845114212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:34,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845114252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/88e45d8bbf9047a78c1faaf05499abbd 2024-12-10T15:37:34,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,329 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:34,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:34,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:34,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/fe812ad9c6c94fe2a11d163576c2fd2c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c 2024-12-10T15:37:34,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c, entries=200, sequenceid=172, filesize=38.8 K 2024-12-10T15:37:34,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ce1538b527b243f48265011d5a9da3af as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af 2024-12-10T15:37:34,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af, entries=150, sequenceid=172, filesize=11.9 K 2024-12-10T15:37:34,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/88e45d8bbf9047a78c1faaf05499abbd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd 2024-12-10T15:37:34,434 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd, entries=150, sequenceid=172, filesize=11.9 K 2024-12-10T15:37:34,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cd3195b888bdde70b3568541344b4bc7 in 1682ms, sequenceid=172, compaction requested=false 2024-12-10T15:37:34,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:34,482 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:34,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-10T15:37:34,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:34,487 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:37:34,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:34,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:34,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:34,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:34,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:34,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:34,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210997c49e7e0c54e27b085f6c10ba24fb1_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845053052/Put/seqid=0 2024-12-10T15:37:34,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742255_1431 (size=12304) 2024-12-10T15:37:34,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:34,533 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210997c49e7e0c54e27b085f6c10ba24fb1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210997c49e7e0c54e27b085f6c10ba24fb1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:34,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/44ae908830524e5e915dd2c422cc6f60, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:34,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/44ae908830524e5e915dd2c422cc6f60 is 175, key is test_row_0/A:col10/1733845053052/Put/seqid=0 2024-12-10T15:37:34,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742256_1432 (size=31105) 2024-12-10T15:37:34,957 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/44ae908830524e5e915dd2c422cc6f60 2024-12-10T15:37:34,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/d336cf34ce8e46509f1c5f383500fb37 is 50, key is test_row_0/B:col10/1733845053052/Put/seqid=0 2024-12-10T15:37:35,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742257_1433 (size=12151) 2024-12-10T15:37:35,017 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/d336cf34ce8e46509f1c5f383500fb37 2024-12-10T15:37:35,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/4975974af80d4103903a32066c6511db is 50, key is test_row_0/C:col10/1733845053052/Put/seqid=0 2024-12-10T15:37:35,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742258_1434 (size=12151) 2024-12-10T15:37:35,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:35,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:35,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845115316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845115322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845115427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845115436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,480 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/4975974af80d4103903a32066c6511db 2024-12-10T15:37:35,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/44ae908830524e5e915dd2c422cc6f60 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60 2024-12-10T15:37:35,560 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60, entries=150, sequenceid=199, filesize=30.4 K 2024-12-10T15:37:35,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/d336cf34ce8e46509f1c5f383500fb37 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37 2024-12-10T15:37:35,612 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37, entries=150, sequenceid=199, filesize=11.9 K 2024-12-10T15:37:35,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/4975974af80d4103903a32066c6511db as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db 2024-12-10T15:37:35,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845115636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,657 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db, entries=150, sequenceid=199, filesize=11.9 K 2024-12-10T15:37:35,657 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:35,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845115646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:35,658 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for cd3195b888bdde70b3568541344b4bc7 in 1171ms, sequenceid=199, compaction requested=true 2024-12-10T15:37:35,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:35,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:35,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-10T15:37:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-10T15:37:35,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-10T15:37:35,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5950 sec 2024-12-10T15:37:35,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 3.6090 sec 2024-12-10T15:37:35,963 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:35,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:35,998 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fdfae6e8a05044d78859ab2cc773f91b_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:36,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742259_1435 (size=14794) 2024-12-10T15:37:36,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845116053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845116055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845116167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845116168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-10T15:37:36,177 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-10T15:37:36,180 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:36,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-10T15:37:36,182 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:36,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:36,182 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:36,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:36,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:36,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:36,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:36,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845116378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845116383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,425 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:36,441 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210fdfae6e8a05044d78859ab2cc773f91b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fdfae6e8a05044d78859ab2cc773f91b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:36,442 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e4ee3fdfe22e47499d9a1c7cc2546281, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:36,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e4ee3fdfe22e47499d9a1c7cc2546281 is 175, key is test_row_0/A:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:36,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742260_1436 (size=39749) 2024-12-10T15:37:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:36,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:36,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:36,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:36,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845116691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845116699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:36,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:36,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:36,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,894 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=212, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e4ee3fdfe22e47499d9a1c7cc2546281 2024-12-10T15:37:36,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b77dfc0f836456d9a4594fd03d81f39 is 50, key is test_row_0/B:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:36,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:36,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:36,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:36,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:36,980 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742261_1437 (size=12151) 2024-12-10T15:37:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:36,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b77dfc0f836456d9a4594fd03d81f39 2024-12-10T15:37:37,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/7feddf49b9494b5e97f38116acc786b0 is 50, key is test_row_0/C:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742262_1438 (size=12151) 2024-12-10T15:37:37,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/7feddf49b9494b5e97f38116acc786b0 2024-12-10T15:37:37,129 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/e4ee3fdfe22e47499d9a1c7cc2546281 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281 2024-12-10T15:37:37,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:37,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:37,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:37,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:37,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281, entries=200, sequenceid=212, filesize=38.8 K 2024-12-10T15:37:37,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/1b77dfc0f836456d9a4594fd03d81f39 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39 2024-12-10T15:37:37,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845117199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:37,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845117210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:37,227 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39, entries=150, sequenceid=212, filesize=11.9 K 2024-12-10T15:37:37,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/7feddf49b9494b5e97f38116acc786b0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0 2024-12-10T15:37:37,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0, entries=150, sequenceid=212, filesize=11.9 K 2024-12-10T15:37:37,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cd3195b888bdde70b3568541344b4bc7 in 1310ms, sequenceid=212, compaction requested=true 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:37,274 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:37,274 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:37,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:37,284 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:37,284 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:37,284 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,284 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/73508a06ed1c47af88f8d2da92543edc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=47.8 K 2024-12-10T15:37:37,285 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 73508a06ed1c47af88f8d2da92543edc, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:37,287 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ce1538b527b243f48265011d5a9da3af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733845052090 2024-12-10T15:37:37,288 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d336cf34ce8e46509f1c5f383500fb37, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733845052970 2024-12-10T15:37:37,288 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b77dfc0f836456d9a4594fd03d81f39, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:37,289 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142050 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:37,289 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:37,289 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,289 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=138.7 K 2024-12-10T15:37:37,289 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,289 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281] 2024-12-10T15:37:37,290 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30a45ebd830b46c399a5d919b10dbf92, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:37,294 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe812ad9c6c94fe2a11d163576c2fd2c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733845052062 2024-12-10T15:37:37,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:37,298 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44ae908830524e5e915dd2c422cc6f60, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733845052970 2024-12-10T15:37:37,299 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:37,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-10T15:37:37,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,299 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:37,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:37,300 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4ee3fdfe22e47499d9a1c7cc2546281, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:37,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f90b9dcd9d024e5f920819b8bf224051_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845056043/Put/seqid=0 2024-12-10T15:37:37,349 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:37,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/0cebd1d4455b4553b5c553fce398dbc7 is 50, key is test_row_0/B:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:37,352 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:37,354 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210d4588882ca2c40a9b28f6fa17746be8b_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:37,356 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210d4588882ca2c40a9b28f6fa17746be8b_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:37,356 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d4588882ca2c40a9b28f6fa17746be8b_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742264_1440 (size=12629) 2024-12-10T15:37:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742263_1439 (size=12304) 2024-12-10T15:37:37,368 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/0cebd1d4455b4553b5c553fce398dbc7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/0cebd1d4455b4553b5c553fce398dbc7 2024-12-10T15:37:37,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:37,376 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 0cebd1d4455b4553b5c553fce398dbc7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:37,376 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:37,376 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=12, startTime=1733845057274; duration=0sec 2024-12-10T15:37:37,376 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:37,376 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:37,376 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:37,377 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210f90b9dcd9d024e5f920819b8bf224051_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f90b9dcd9d024e5f920819b8bf224051_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:37,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742265_1441 (size=4469) 2024-12-10T15:37:37,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5b8606eb6fec4369a2903b79a6b1fe03, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:37,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5b8606eb6fec4369a2903b79a6b1fe03 is 175, key is test_row_0/A:col10/1733845056043/Put/seqid=0 2024-12-10T15:37:37,388 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#373 average throughput is 0.68 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:37,388 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/3b7d9001df564b1fb7ca657dcf29db46 is 175, key is test_row_0/A:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:37,392 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:37,392 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:37,392 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:37,393 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/d845e482be5e44f88dcf063c7591030b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=47.8 K 2024-12-10T15:37:37,394 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d845e482be5e44f88dcf063c7591030b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1733845051906 2024-12-10T15:37:37,394 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 88e45d8bbf9047a78c1faaf05499abbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1733845052090 2024-12-10T15:37:37,399 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4975974af80d4103903a32066c6511db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733845052970 2024-12-10T15:37:37,400 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7feddf49b9494b5e97f38116acc786b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:37,430 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#374 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:37,430 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/dcbc186b94ab45bd87743aeacdf74b66 is 50, key is test_row_0/C:col10/1733845055294/Put/seqid=0 2024-12-10T15:37:37,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742266_1442 (size=31583) 2024-12-10T15:37:37,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742267_1443 (size=31105) 2024-12-10T15:37:37,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742268_1444 (size=12629) 2024-12-10T15:37:37,461 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/dcbc186b94ab45bd87743aeacdf74b66 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dcbc186b94ab45bd87743aeacdf74b66 2024-12-10T15:37:37,488 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into dcbc186b94ab45bd87743aeacdf74b66(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:37,489 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:37,489 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=12, startTime=1733845057274; duration=0sec 2024-12-10T15:37:37,489 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:37,489 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:37,842 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5b8606eb6fec4369a2903b79a6b1fe03 2024-12-10T15:37:37,843 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/3b7d9001df564b1fb7ca657dcf29db46 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46 2024-12-10T15:37:37,852 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into 3b7d9001df564b1fb7ca657dcf29db46(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:37,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:37,852 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=12, startTime=1733845057274; duration=0sec 2024-12-10T15:37:37,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:37,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:37,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6989225ab14945e8b711953bc996a138 is 50, key is test_row_0/B:col10/1733845056043/Put/seqid=0 2024-12-10T15:37:37,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742269_1445 (size=12151) 2024-12-10T15:37:37,916 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6989225ab14945e8b711953bc996a138 2024-12-10T15:37:37,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e95e5a62bfc246168ef2922e772d73f3 is 50, key is test_row_0/C:col10/1733845056043/Put/seqid=0 2024-12-10T15:37:37,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742270_1446 (size=12151) 2024-12-10T15:37:37,983 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e95e5a62bfc246168ef2922e772d73f3 2024-12-10T15:37:38,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5b8606eb6fec4369a2903b79a6b1fe03 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03 2024-12-10T15:37:38,040 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03, entries=150, sequenceid=235, filesize=30.4 K 2024-12-10T15:37:38,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6989225ab14945e8b711953bc996a138 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138 2024-12-10T15:37:38,045 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138, entries=150, sequenceid=235, filesize=11.9 K 2024-12-10T15:37:38,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/e95e5a62bfc246168ef2922e772d73f3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3 2024-12-10T15:37:38,050 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3, entries=150, sequenceid=235, filesize=11.9 K 2024-12-10T15:37:38,052 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for cd3195b888bdde70b3568541344b4bc7 in 752ms, sequenceid=235, compaction requested=false 2024-12-10T15:37:38,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:38,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:38,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-10T15:37:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-10T15:37:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-10T15:37:38,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8720 sec 2024-12-10T15:37:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 1.8800 sec 2024-12-10T15:37:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-10T15:37:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,300 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-10T15:37:38,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,307 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:38,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-10T15:37:38,309 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,309 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:38,309 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:38,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:38,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,461 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-10T15:37:38,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,462 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:38,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:38,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:38,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:38,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b81571df78264a70a13a448c42ebefc1_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845058460/Put/seqid=0 2024-12-10T15:37:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742271_1447 (size=19774) 2024-12-10T15:37:38,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:38,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845118641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845118643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845118750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845118752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:38,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:38,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845118958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:38,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845118960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:38,980 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b81571df78264a70a13a448c42ebefc1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b81571df78264a70a13a448c42ebefc1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:38,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/0c509aef24464142ae57711a67904af8, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:38,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/0c509aef24464142ae57711a67904af8 is 175, key is test_row_0/A:col10/1733845058460/Put/seqid=0 2024-12-10T15:37:39,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742272_1448 (size=57033) 2024-12-10T15:37:39,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845119270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:39,281 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845119273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:39,404 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/0c509aef24464142ae57711a67904af8 2024-12-10T15:37:39,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:39,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/611e26daddc347629c84107230d22776 is 50, key is test_row_0/B:col10/1733845058460/Put/seqid=0 2024-12-10T15:37:39,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742273_1449 (size=12151) 2024-12-10T15:37:39,511 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/611e26daddc347629c84107230d22776 2024-12-10T15:37:39,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/efd201e10f3a46d8b27e3e6be4573496 is 50, key is test_row_0/C:col10/1733845058460/Put/seqid=0 2024-12-10T15:37:39,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742274_1450 (size=12151) 2024-12-10T15:37:39,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845119788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:39,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:39,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845119792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,035 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/efd201e10f3a46d8b27e3e6be4573496 2024-12-10T15:37:40,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/0c509aef24464142ae57711a67904af8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8 2024-12-10T15:37:40,087 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8, entries=300, sequenceid=248, filesize=55.7 K 2024-12-10T15:37:40,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/611e26daddc347629c84107230d22776 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776 2024-12-10T15:37:40,091 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:37:40,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/efd201e10f3a46d8b27e3e6be4573496 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496 2024-12-10T15:37:40,097 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:37:40,098 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for cd3195b888bdde70b3568541344b4bc7 in 1636ms, sequenceid=248, compaction requested=true 2024-12-10T15:37:40,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:40,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:40,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-10T15:37:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-10T15:37:40,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-10T15:37:40,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7900 sec 2024-12-10T15:37:40,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.7940 sec 2024-12-10T15:37:40,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-10T15:37:40,417 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-10T15:37:40,426 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-10T15:37:40,427 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:40,428 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:40,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T15:37:40,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T15:37:40,581 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:40,582 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:40,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:40,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:40,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c799c5dfddef453689a0c2dcb939339c_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845058638/Put/seqid=0 2024-12-10T15:37:40,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742275_1451 (size=12454) 2024-12-10T15:37:40,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:40,685 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c799c5dfddef453689a0c2dcb939339c_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c799c5dfddef453689a0c2dcb939339c_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:40,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6d39fe95d2ac472e898025293979f679, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:40,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6d39fe95d2ac472e898025293979f679 is 175, key is test_row_0/A:col10/1733845058638/Put/seqid=0 2024-12-10T15:37:40,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T15:37:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742276_1452 (size=31255) 2024-12-10T15:37:40,767 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=274, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6d39fe95d2ac472e898025293979f679 2024-12-10T15:37:40,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:40,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/30cba97737ad4f32a3cdcf902cf58c87 is 50, key is test_row_0/B:col10/1733845058638/Put/seqid=0 2024-12-10T15:37:40,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:40,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845120834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:40,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845120838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742277_1453 (size=12301) 2024-12-10T15:37:40,879 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/30cba97737ad4f32a3cdcf902cf58c87 2024-12-10T15:37:40,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/cb300d199d9b49ab8b95c54213f9f69c is 50, key is test_row_0/C:col10/1733845058638/Put/seqid=0 2024-12-10T15:37:40,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:40,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845120944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742278_1454 (size=12301) 2024-12-10T15:37:40,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:40,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845120949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:40,968 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/cb300d199d9b49ab8b95c54213f9f69c 2024-12-10T15:37:40,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/6d39fe95d2ac472e898025293979f679 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679 2024-12-10T15:37:41,028 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679, entries=150, sequenceid=274, filesize=30.5 K 2024-12-10T15:37:41,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T15:37:41,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/30cba97737ad4f32a3cdcf902cf58c87 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87 2024-12-10T15:37:41,040 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87, entries=150, sequenceid=274, filesize=12.0 K 2024-12-10T15:37:41,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/cb300d199d9b49ab8b95c54213f9f69c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c 2024-12-10T15:37:41,045 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c, entries=150, sequenceid=274, filesize=12.0 K 2024-12-10T15:37:41,051 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cd3195b888bdde70b3568541344b4bc7 in 469ms, sequenceid=274, compaction requested=true 2024-12-10T15:37:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:41,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-10T15:37:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-10T15:37:41,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-10T15:37:41,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 632 msec 2024-12-10T15:37:41,063 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 636 msec 2024-12-10T15:37:41,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:41,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:37:41,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:41,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:41,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:41,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:41,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:41,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:41,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068a1b3791bee4c0b94ac5efe81010377_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:41,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742279_1455 (size=12454) 2024-12-10T15:37:41,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845121368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845121374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845121487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845121487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-10T15:37:41,537 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-10T15:37:41,538 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:37:41,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-10T15:37:41,540 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:37:41,544 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:37:41,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:37:41,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:41,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:41,691 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:41,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,696 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068a1b3791bee4c0b94ac5efe81010377_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068a1b3791bee4c0b94ac5efe81010377_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:41,697 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/a46656a2003d4e46acd17adde7c174e5, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:41,698 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/a46656a2003d4e46acd17adde7c174e5 is 175, key is test_row_0/A:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:41,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:41,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:41,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:41,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:41,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845121694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:41,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845121695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742280_1456 (size=31255) 2024-12-10T15:37:41,728 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=286, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/a46656a2003d4e46acd17adde7c174e5 2024-12-10T15:37:41,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/983060ace085499183638bd9e0f28ef3 is 50, key is test_row_0/B:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:41,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742281_1457 (size=12301) 2024-12-10T15:37:41,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/983060ace085499183638bd9e0f28ef3 2024-12-10T15:37:41,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/1228bddb665b422ab62d5abf306caa60 is 50, key is test_row_0/C:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:41,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:41,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:41,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:41,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:41,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742282_1458 (size=12301) 2024-12-10T15:37:41,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/1228bddb665b422ab62d5abf306caa60 2024-12-10T15:37:41,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/a46656a2003d4e46acd17adde7c174e5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5 2024-12-10T15:37:41,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5, entries=150, sequenceid=286, filesize=30.5 K 2024-12-10T15:37:41,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/983060ace085499183638bd9e0f28ef3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3 2024-12-10T15:37:41,997 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3, entries=150, sequenceid=286, filesize=12.0 K 2024-12-10T15:37:42,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/1228bddb665b422ab62d5abf306caa60 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60 2024-12-10T15:37:42,011 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60, entries=150, sequenceid=286, filesize=12.0 K 2024-12-10T15:37:42,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cd3195b888bdde70b3568541344b4bc7 in 816ms, sequenceid=286, compaction requested=true 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:42,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:37:42,020 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T15:37:42,023 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T15:37:42,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:42,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:42,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:42,030 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 182231 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T15:37:42,030 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:42,030 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,030 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=178.0 K 2024-12-10T15:37:42,030 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,030 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5] 2024-12-10T15:37:42,033 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61533 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T15:37:42,033 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:42,033 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,033 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dcbc186b94ab45bd87743aeacdf74b66, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=60.1 K 2024-12-10T15:37:42,034 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b7d9001df564b1fb7ca657dcf29db46, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:42,037 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b8606eb6fec4369a2903b79a6b1fe03, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733845056042 2024-12-10T15:37:42,037 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting dcbc186b94ab45bd87743aeacdf74b66, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:42,040 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e95e5a62bfc246168ef2922e772d73f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733845056042 2024-12-10T15:37:42,040 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c509aef24464142ae57711a67904af8, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845058283 2024-12-10T15:37:42,044 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d39fe95d2ac472e898025293979f679, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733845058637 2024-12-10T15:37:42,046 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting efd201e10f3a46d8b27e3e6be4573496, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845058445 2024-12-10T15:37:42,047 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a46656a2003d4e46acd17adde7c174e5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:42,050 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting cb300d199d9b49ab8b95c54213f9f69c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733845058637 2024-12-10T15:37:42,052 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1228bddb665b422ab62d5abf306caa60, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:42,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e84b0f30248c4e26984b36933ef9d15d_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845061368/Put/seqid=0 2024-12-10T15:37:42,065 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:42,077 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:42,078 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/0e8ebdb5590e4883bdb217ea582d8254 is 50, key is test_row_0/C:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845122085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,099 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845122089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,113 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210430b9feabdc948f1a6267bd797446b6b_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:42,116 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210430b9feabdc948f1a6267bd797446b6b_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:42,116 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210430b9feabdc948f1a6267bd797446b6b_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:42,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742283_1459 (size=14994) 2024-12-10T15:37:42,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742284_1460 (size=12949) 2024-12-10T15:37:42,123 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:42,127 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e84b0f30248c4e26984b36933ef9d15d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e84b0f30248c4e26984b36933ef9d15d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:42,128 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/db3744b37db442c5888123af2c79d182, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:42,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/db3744b37db442c5888123af2c79d182 is 175, key is test_row_0/A:col10/1733845061368/Put/seqid=0 2024-12-10T15:37:42,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:42,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742286_1462 (size=39949) 2024-12-10T15:37:42,155 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=311, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/db3744b37db442c5888123af2c79d182 2024-12-10T15:37:42,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742285_1461 (size=4469) 2024-12-10T15:37:42,171 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/e2979f5bb0734fbb93daaad4119de694 is 50, key is test_row_0/B:col10/1733845061368/Put/seqid=0 2024-12-10T15:37:42,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742287_1463 (size=12301) 2024-12-10T15:37:42,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845122203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845122211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,337 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845122427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845122427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,496 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,554 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/0e8ebdb5590e4883bdb217ea582d8254 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/0e8ebdb5590e4883bdb217ea582d8254 2024-12-10T15:37:42,577 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#387 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:42,577 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into 0e8ebdb5590e4883bdb217ea582d8254(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:42,577 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:42,577 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=11, startTime=1733845062020; duration=0sec 2024-12-10T15:37:42,578 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:42,578 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:42,578 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-10T15:37:42,578 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5fabf8c6d0054833b35200b07498922c is 175, key is test_row_0/A:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:42,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61533 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-10T15:37:42,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:42,589 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,589 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/0cebd1d4455b4553b5c553fce398dbc7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=60.1 K 2024-12-10T15:37:42,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cebd1d4455b4553b5c553fce398dbc7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733845055294 2024-12-10T15:37:42,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6989225ab14945e8b711953bc996a138, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733845056042 2024-12-10T15:37:42,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 611e26daddc347629c84107230d22776, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845058445 2024-12-10T15:37:42,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 30cba97737ad4f32a3cdcf902cf58c87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733845058637 2024-12-10T15:37:42,591 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 983060ace085499183638bd9e0f28ef3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:42,601 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:42,602 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/65f3495ae0ac42318e3b88c3645dad2d is 50, key is test_row_0/B:col10/1733845061178/Put/seqid=0 2024-12-10T15:37:42,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742288_1464 (size=31903) 2024-12-10T15:37:42,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/e2979f5bb0734fbb93daaad4119de694 2024-12-10T15:37:42,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/f02d002975e848dd9fb798ffd1c79163 is 50, key is test_row_0/C:col10/1733845061368/Put/seqid=0 2024-12-10T15:37:42,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:42,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742289_1465 (size=12949) 2024-12-10T15:37:42,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742290_1466 (size=12301) 2024-12-10T15:37:42,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845122734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845122743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,817 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,979 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:42,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:42,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:42,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:42,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:42,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:37:43,035 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/5fabf8c6d0054833b35200b07498922c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c 2024-12-10T15:37:43,074 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into 5fabf8c6d0054833b35200b07498922c(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-10T15:37:43,074 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:43,074 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=11, startTime=1733845062020; duration=1sec 2024-12-10T15:37:43,074 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/65f3495ae0ac42318e3b88c3645dad2d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/65f3495ae0ac42318e3b88c3645dad2d 2024-12-10T15:37:43,074 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:43,074 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:43,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/f02d002975e848dd9fb798ffd1c79163 2024-12-10T15:37:43,080 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 65f3495ae0ac42318e3b88c3645dad2d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:43,080 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:43,080 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=11, startTime=1733845062020; duration=0sec 2024-12-10T15:37:43,080 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:43,080 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:43,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/db3744b37db442c5888123af2c79d182 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182 2024-12-10T15:37:43,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182, entries=200, sequenceid=311, filesize=39.0 K 2024-12-10T15:37:43,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/e2979f5bb0734fbb93daaad4119de694 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694 2024-12-10T15:37:43,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694, entries=150, sequenceid=311, filesize=12.0 K 2024-12-10T15:37:43,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/f02d002975e848dd9fb798ffd1c79163 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163 2024-12-10T15:37:43,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163, entries=150, sequenceid=311, filesize=12.0 K 2024-12-10T15:37:43,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cd3195b888bdde70b3568541344b4bc7 in 1073ms, sequenceid=311, compaction requested=false 2024-12-10T15:37:43,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:43,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:43,143 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:43,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:43,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121088b54f50bfda46d483c2bba751046a8a_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845062083/Put/seqid=0 2024-12-10T15:37:43,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742291_1467 (size=12454) 2024-12-10T15:37:43,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:43,235 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121088b54f50bfda46d483c2bba751046a8a_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121088b54f50bfda46d483c2bba751046a8a_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:43,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/d42e95e3b00d4589894c526c7f6afcb5, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:43,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/d42e95e3b00d4589894c526c7f6afcb5 is 175, key is test_row_0/A:col10/1733845062083/Put/seqid=0 2024-12-10T15:37:43,243 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. as already flushing 2024-12-10T15:37:43,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:43,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742292_1468 (size=31255) 2024-12-10T15:37:43,287 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/d42e95e3b00d4589894c526c7f6afcb5 2024-12-10T15:37:43,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6c27e04835594f21ac3c136913f1c24a is 50, key is test_row_0/B:col10/1733845062083/Put/seqid=0 2024-12-10T15:37:43,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742293_1469 (size=12301) 2024-12-10T15:37:43,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845123336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845123343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,359 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6c27e04835594f21ac3c136913f1c24a 2024-12-10T15:37:43,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845123347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845123352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845123362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/959d542454f748a898744cc460e34b29 is 50, key is test_row_0/C:col10/1733845062083/Put/seqid=0 2024-12-10T15:37:43,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742294_1470 (size=12301) 2024-12-10T15:37:43,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845123457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845123457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,476 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845123471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845123473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845123479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:43,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845123679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845123679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845123685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845123685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:43,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845123687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:43,864 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/959d542454f748a898744cc460e34b29 2024-12-10T15:37:43,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/d42e95e3b00d4589894c526c7f6afcb5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5 2024-12-10T15:37:43,877 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5, entries=150, sequenceid=325, filesize=30.5 K 2024-12-10T15:37:43,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/6c27e04835594f21ac3c136913f1c24a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a 2024-12-10T15:37:43,882 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a, entries=150, sequenceid=325, filesize=12.0 K 2024-12-10T15:37:43,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/959d542454f748a898744cc460e34b29 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29 2024-12-10T15:37:43,887 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29, entries=150, sequenceid=325, filesize=12.0 K 2024-12-10T15:37:43,891 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for cd3195b888bdde70b3568541344b4bc7 in 747ms, sequenceid=325, compaction requested=true 2024-12-10T15:37:43,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:43,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:43,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-10T15:37:43,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-10T15:37:43,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-10T15:37:43,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3540 sec 2024-12-10T15:37:43,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.3640 sec 2024-12-10T15:37:43,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:43,991 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:43,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:44,008 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210055e1ec3ed9e488ebb749476510a0e0d_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:44,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845124008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845124008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845124009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742295_1471 (size=14994) 2024-12-10T15:37:44,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845124016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845124019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845124118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845124118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845124118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845124129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845124140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845124326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845124326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845124327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845124338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845124359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,420 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:44,437 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210055e1ec3ed9e488ebb749476510a0e0d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210055e1ec3ed9e488ebb749476510a0e0d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:44,443 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/b156f85167a449c2b1a2fcf4f742f0eb, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:44,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/b156f85167a449c2b1a2fcf4f742f0eb is 175, key is test_row_0/A:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:44,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742296_1472 (size=39949) 2024-12-10T15:37:44,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845124636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,641 DEBUG [Thread-1728 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:56346 2024-12-10T15:37:44,641 DEBUG [Thread-1728 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:44,645 DEBUG [Thread-1730 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4cb9e50e to 127.0.0.1:56346 2024-12-10T15:37:44,645 DEBUG [Thread-1730 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:44,645 DEBUG [Thread-1726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:56346 2024-12-10T15:37:44,645 DEBUG [Thread-1726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:44,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845124639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845124642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,653 DEBUG [Thread-1722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c907e21 to 127.0.0.1:56346 2024-12-10T15:37:44,653 DEBUG [Thread-1722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:44,660 DEBUG [Thread-1724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:56346 2024-12-10T15:37:44,660 DEBUG [Thread-1724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:44,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845124659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:44,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845124676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:44,874 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=352, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/b156f85167a449c2b1a2fcf4f742f0eb 2024-12-10T15:37:44,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ccfc34aade5441c88c5eebe4e17050ae is 50, key is test_row_0/B:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:44,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742297_1473 (size=12301) 2024-12-10T15:37:44,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ccfc34aade5441c88c5eebe4e17050ae 2024-12-10T15:37:44,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/5ec7fcd8c50e42c7b48afbbb47278458 is 50, key is test_row_0/C:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:44,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742298_1474 (size=12301) 2024-12-10T15:37:45,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:45,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35082 deadline: 1733845125149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:45,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:45,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35060 deadline: 1733845125152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:45,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:45,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35026 deadline: 1733845125156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:45,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:45,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35044 deadline: 1733845125175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:45,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:37:45,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35012 deadline: 1733845125181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:45,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/5ec7fcd8c50e42c7b48afbbb47278458 2024-12-10T15:37:45,386 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/b156f85167a449c2b1a2fcf4f742f0eb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb 2024-12-10T15:37:45,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb, entries=200, sequenceid=352, filesize=39.0 K 2024-12-10T15:37:45,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/ccfc34aade5441c88c5eebe4e17050ae as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae 2024-12-10T15:37:45,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae, entries=150, sequenceid=352, filesize=12.0 K 2024-12-10T15:37:45,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/5ec7fcd8c50e42c7b48afbbb47278458 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458 2024-12-10T15:37:45,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458, entries=150, sequenceid=352, filesize=12.0 K 2024-12-10T15:37:45,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for cd3195b888bdde70b3568541344b4bc7 in 1431ms, sequenceid=352, compaction requested=true 2024-12-10T15:37:45,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:45,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:37:45,423 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:45,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:45,423 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:45,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:37:45,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:45,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cd3195b888bdde70b3568541344b4bc7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:37:45,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:45,425 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:45,425 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/B is initiating minor compaction (all files) 2024-12-10T15:37:45,425 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/B in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:45,425 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/65f3495ae0ac42318e3b88c3645dad2d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=48.7 K 2024-12-10T15:37:45,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 65f3495ae0ac42318e3b88c3645dad2d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:45,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e2979f5bb0734fbb93daaad4119de694, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845061315 2024-12-10T15:37:45,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c27e04835594f21ac3c136913f1c24a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845062072 2024-12-10T15:37:45,426 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ccfc34aade5441c88c5eebe4e17050ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845063328 2024-12-10T15:37:45,432 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#B#compaction#398 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:45,433 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/779695705ff04bfea4c78d8017ae545b is 50, key is test_row_0/B:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:45,433 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:45,433 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/A is initiating minor compaction (all files) 2024-12-10T15:37:45,433 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/A in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:45,433 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=139.7 K 2024-12-10T15:37:45,433 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:45,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb] 2024-12-10T15:37:45,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fabf8c6d0054833b35200b07498922c, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:45,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db3744b37db442c5888123af2c79d182, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845061315 2024-12-10T15:37:45,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d42e95e3b00d4589894c526c7f6afcb5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845062072 2024-12-10T15:37:45,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b156f85167a449c2b1a2fcf4f742f0eb, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845063328 2024-12-10T15:37:45,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742299_1475 (size=13085) 2024-12-10T15:37:45,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-10T15:37:45,656 INFO [Thread-1721 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-10T15:37:46,172 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:46,174 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210e5b7117b2b274e18b1a4905a3b25376c_cd3195b888bdde70b3568541344b4bc7 store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:46,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:37:46,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:46,176 DEBUG [Thread-1711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:56346 2024-12-10T15:37:46,176 DEBUG [Thread-1711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:46,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:46,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:46,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:46,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:46,177 DEBUG [Thread-1715 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:56346 2024-12-10T15:37:46,177 DEBUG [Thread-1715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,178 DEBUG [Thread-1713 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:56346 2024-12-10T15:37:46,178 DEBUG [Thread-1713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,186 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/779695705ff04bfea4c78d8017ae545b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/779695705ff04bfea4c78d8017ae545b 2024-12-10T15:37:46,191 DEBUG [Thread-1717 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:56346 2024-12-10T15:37:46,191 DEBUG [Thread-1717 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210db006e7255b0411f8411209355e4207b_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_0/A:col10/1733845064005/Put/seqid=0 2024-12-10T15:37:46,199 DEBUG [Thread-1719 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:56346 2024-12-10T15:37:46,199 DEBUG [Thread-1719 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 120 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 123 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 16 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1251 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3753 rows 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1262 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3786 rows 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1259 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3777 rows 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1257 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3771 rows 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1249 2024-12-10T15:37:46,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3744 rows 2024-12-10T15:37:46,200 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:37:46,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75b14fbd to 127.0.0.1:56346 2024-12-10T15:37:46,200 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:37:46,204 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:37:46,204 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:37:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:37:46,207 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/B of cd3195b888bdde70b3568541344b4bc7 into 779695705ff04bfea4c78d8017ae545b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:46,207 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:46,207 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/B, priority=12, startTime=1733845065423; duration=0sec 2024-12-10T15:37:46,208 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:37:46,208 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:B 2024-12-10T15:37:46,208 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:37:46,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845066209"}]},"ts":"1733845066209"} 2024-12-10T15:37:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:46,214 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:37:46,221 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:37:46,221 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): cd3195b888bdde70b3568541344b4bc7/C is initiating minor compaction (all files) 2024-12-10T15:37:46,221 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cd3195b888bdde70b3568541344b4bc7/C in TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:46,221 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/0e8ebdb5590e4883bdb217ea582d8254, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp, totalSize=48.7 K 2024-12-10T15:37:46,221 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e8ebdb5590e4883bdb217ea582d8254, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733845060831 2024-12-10T15:37:46,222 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f02d002975e848dd9fb798ffd1c79163, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733845061315 2024-12-10T15:37:46,222 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 959d542454f748a898744cc460e34b29, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845062072 2024-12-10T15:37:46,228 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ec7fcd8c50e42c7b48afbbb47278458, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845063328 2024-12-10T15:37:46,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742300_1476 (size=12454) 2024-12-10T15:37:46,299 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210e5b7117b2b274e18b1a4905a3b25376c_cd3195b888bdde70b3568541344b4bc7, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:46,299 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210e5b7117b2b274e18b1a4905a3b25376c_cd3195b888bdde70b3568541344b4bc7 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:46,312 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#C#compaction#401 average throughput is 0.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:46,312 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/dc3a5b6cf55d491dbf257359d592b2b6 is 50, key is test_row_0/C:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:46,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742301_1477 (size=4469) 2024-12-10T15:37:46,321 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cd3195b888bdde70b3568541344b4bc7#A#compaction#399 average throughput is 0.17 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:37:46,322 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/54d37547fdf14be28b3df3707acd6c7c is 175, key is test_row_0/A:col10/1733845063328/Put/seqid=0 2024-12-10T15:37:46,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742302_1478 (size=13085) 2024-12-10T15:37:46,349 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/dc3a5b6cf55d491dbf257359d592b2b6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dc3a5b6cf55d491dbf257359d592b2b6 2024-12-10T15:37:46,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742303_1479 (size=32039) 2024-12-10T15:37:46,355 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/C of cd3195b888bdde70b3568541344b4bc7 into dc3a5b6cf55d491dbf257359d592b2b6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:46,355 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:46,355 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/C, priority=12, startTime=1733845065424; duration=0sec 2024-12-10T15:37:46,355 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:46,355 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:C 2024-12-10T15:37:46,366 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/54d37547fdf14be28b3df3707acd6c7c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/54d37547fdf14be28b3df3707acd6c7c 2024-12-10T15:37:46,370 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cd3195b888bdde70b3568541344b4bc7/A of cd3195b888bdde70b3568541344b4bc7 into 54d37547fdf14be28b3df3707acd6c7c(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:37:46,370 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:46,370 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7., storeName=cd3195b888bdde70b3568541344b4bc7/A, priority=12, startTime=1733845065422; duration=0sec 2024-12-10T15:37:46,370 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:37:46,370 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cd3195b888bdde70b3568541344b4bc7:A 2024-12-10T15:37:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:46,696 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:46,704 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210db006e7255b0411f8411209355e4207b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210db006e7255b0411f8411209355e4207b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:46,707 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/33483552fa6943fdb3508a47ca2f32dc, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:46,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/33483552fa6943fdb3508a47ca2f32dc is 175, key is test_row_0/A:col10/1733845064005/Put/seqid=0 2024-12-10T15:37:46,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742304_1480 (size=31255) 2024-12-10T15:37:46,738 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/33483552fa6943fdb3508a47ca2f32dc 2024-12-10T15:37:46,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3a9fc3fe5e4c46b8abbc621a999b6a30 is 50, key is test_row_0/B:col10/1733845064005/Put/seqid=0 2024-12-10T15:37:46,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742305_1481 (size=12301) 2024-12-10T15:37:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:46,887 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:37:46,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:37:46,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, UNASSIGN}] 2024-12-10T15:37:46,890 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, UNASSIGN 2024-12-10T15:37:46,891 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:46,892 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:37:46,892 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; CloseRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:37:47,047 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:37:47,055 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(124): Close cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:47,055 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:37:47,055 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1681): Closing cd3195b888bdde70b3568541344b4bc7, disabling compactions & flushes 2024-12-10T15:37:47,055 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:47,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3a9fc3fe5e4c46b8abbc621a999b6a30 2024-12-10T15:37:47,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/903f69735f52421da2ff3d1d80e51b70 is 50, key is test_row_0/C:col10/1733845064005/Put/seqid=0 2024-12-10T15:37:47,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742306_1482 (size=12301) 2024-12-10T15:37:47,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:47,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/903f69735f52421da2ff3d1d80e51b70 2024-12-10T15:37:47,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/33483552fa6943fdb3508a47ca2f32dc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/33483552fa6943fdb3508a47ca2f32dc 2024-12-10T15:37:47,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/33483552fa6943fdb3508a47ca2f32dc, entries=150, sequenceid=364, filesize=30.5 K 2024-12-10T15:37:47,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/3a9fc3fe5e4c46b8abbc621a999b6a30 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3a9fc3fe5e4c46b8abbc621a999b6a30 2024-12-10T15:37:47,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3a9fc3fe5e4c46b8abbc621a999b6a30, entries=150, sequenceid=364, filesize=12.0 K 2024-12-10T15:37:47,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/903f69735f52421da2ff3d1d80e51b70 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/903f69735f52421da2ff3d1d80e51b70 2024-12-10T15:37:47,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/903f69735f52421da2ff3d1d80e51b70, entries=150, sequenceid=364, filesize=12.0 K 2024-12-10T15:37:47,590 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=13.42 KB/13740 for cd3195b888bdde70b3568541344b4bc7 in 1414ms, sequenceid=364, compaction requested=false 2024-12-10T15:37:47,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:47,590 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:47,590 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:47,590 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. after waiting 0 ms 2024-12-10T15:37:47,590 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:47,590 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(2837): Flushing cd3195b888bdde70b3568541344b4bc7 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=A 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=B 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cd3195b888bdde70b3568541344b4bc7, store=C 2024-12-10T15:37:47,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:37:47,594 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d1d11f7fb42b44849b344a83f630a242_cd3195b888bdde70b3568541344b4bc7 is 50, key is test_row_1/A:col10/1733845066191/Put/seqid=0 2024-12-10T15:37:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742307_1483 (size=9914) 2024-12-10T15:37:47,997 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:37:48,002 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d1d11f7fb42b44849b344a83f630a242_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d1d11f7fb42b44849b344a83f630a242_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:48,003 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/8c26c3d078324f699e696ea28024c8a3, store: [table=TestAcidGuarantees family=A region=cd3195b888bdde70b3568541344b4bc7] 2024-12-10T15:37:48,004 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/8c26c3d078324f699e696ea28024c8a3 is 175, key is test_row_1/A:col10/1733845066191/Put/seqid=0 2024-12-10T15:37:48,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742308_1484 (size=22561) 2024-12-10T15:37:48,011 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/8c26c3d078324f699e696ea28024c8a3 2024-12-10T15:37:48,183 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/71f690835bef4cc7a0c156ed63abdc75 is 50, key is test_row_1/B:col10/1733845066191/Put/seqid=0 2024-12-10T15:37:48,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:50,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:37:50,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:54,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742309_1485 (size=9857) 2024-12-10T15:37:54,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:37:54,504 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/71f690835bef4cc7a0c156ed63abdc75 2024-12-10T15:37:54,537 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/b72be756dbca463e844e475b70b59683 is 50, key is test_row_1/C:col10/1733845066191/Put/seqid=0 2024-12-10T15:37:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742310_1486 (size=9857) 2024-12-10T15:37:54,942 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/b72be756dbca463e844e475b70b59683 2024-12-10T15:37:54,947 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/A/8c26c3d078324f699e696ea28024c8a3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/8c26c3d078324f699e696ea28024c8a3 2024-12-10T15:37:54,949 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/8c26c3d078324f699e696ea28024c8a3, entries=100, sequenceid=372, filesize=22.0 K 2024-12-10T15:37:54,950 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/B/71f690835bef4cc7a0c156ed63abdc75 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/71f690835bef4cc7a0c156ed63abdc75 2024-12-10T15:37:54,953 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/71f690835bef4cc7a0c156ed63abdc75, entries=100, sequenceid=372, filesize=9.6 K 2024-12-10T15:37:54,954 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/.tmp/C/b72be756dbca463e844e475b70b59683 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/b72be756dbca463e844e475b70b59683 2024-12-10T15:37:54,957 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/b72be756dbca463e844e475b70b59683, entries=100, sequenceid=372, filesize=9.6 K 2024-12-10T15:37:54,958 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for cd3195b888bdde70b3568541344b4bc7 in 7367ms, sequenceid=372, compaction requested=true 2024-12-10T15:37:54,959 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb] to archive 2024-12-10T15:37:54,962 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:54,967 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e8374c8ab6e545d0a116689ccbfb9141 2024-12-10T15:37:54,976 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/4d361a4415a34e0fbe4b25ba381fc22b 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/90c1ee844dfb4fccadf971c63ec6daa2 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6ae0843f0c9740cd9fbe18655e2a169b 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/12f0edb366034fa4ae005dfc4f58a7bb 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e705c0834bd4407ea0fb056cb1ae402a 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/63a6ba9588414943a251bc969baaadde 2024-12-10T15:37:55,072 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/7cd505af8c0f472b8daf0252fc614324 2024-12-10T15:37:55,073 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/30a45ebd830b46c399a5d919b10dbf92 2024-12-10T15:37:55,073 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/618bf03de2d343a6b830ed119e01f172 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/bf0937f134364a2698b5b9330be28e95 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/fe812ad9c6c94fe2a11d163576c2fd2c 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/e4ee3fdfe22e47499d9a1c7cc2546281 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/44ae908830524e5e915dd2c422cc6f60 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/3b7d9001df564b1fb7ca657dcf29db46 2024-12-10T15:37:55,181 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5b8606eb6fec4369a2903b79a6b1fe03 2024-12-10T15:37:55,320 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/6d39fe95d2ac472e898025293979f679 2024-12-10T15:37:55,320 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/0c509aef24464142ae57711a67904af8 2024-12-10T15:37:55,321 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/a46656a2003d4e46acd17adde7c174e5 2024-12-10T15:37:55,321 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/d42e95e3b00d4589894c526c7f6afcb5 2024-12-10T15:37:55,321 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/b156f85167a449c2b1a2fcf4f742f0eb 2024-12-10T15:37:55,321 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/db3744b37db442c5888123af2c79d182 2024-12-10T15:37:55,321 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/5fabf8c6d0054833b35200b07498922c 2024-12-10T15:37:55,322 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/23fc5c1f31f34ae19d4ea9d418940999, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/52b12045c76c456c86e45dab17f5aec5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/73508a06ed1c47af88f8d2da92543edc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/0cebd1d4455b4553b5c553fce398dbc7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/65f3495ae0ac42318e3b88c3645dad2d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae] to archive 2024-12-10T15:37:55,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:55,357 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/7bc34745f8084a72bdbdf8db2216af88 2024-12-10T15:37:55,357 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3328861de8d04a1ea02fd54d54f37709 2024-12-10T15:37:55,357 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/23fc5c1f31f34ae19d4ea9d418940999 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/23fc5c1f31f34ae19d4ea9d418940999 2024-12-10T15:37:55,357 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/f3d9814c740e41519372eef1139334e7 2024-12-10T15:37:55,357 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/be88d19f48c3438a8ec3de17ea8efc33 2024-12-10T15:37:55,358 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/52b12045c76c456c86e45dab17f5aec5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/52b12045c76c456c86e45dab17f5aec5 2024-12-10T15:37:55,358 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6646b75cae8e417fb22bc4cca4d5caea 2024-12-10T15:37:55,358 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6eb23344bb3f4534a1fddb3c4a7f3511 2024-12-10T15:37:55,398 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/5f4fb62d3f534136b4df8b8748aedfd9 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/d336cf34ce8e46509f1c5f383500fb37 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6989225ab14945e8b711953bc996a138 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ce1538b527b243f48265011d5a9da3af 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/0cebd1d4455b4553b5c553fce398dbc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/0cebd1d4455b4553b5c553fce398dbc7 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/73508a06ed1c47af88f8d2da92543edc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/73508a06ed1c47af88f8d2da92543edc 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b27ef32c1f7405587a9e9c18dc016ec 2024-12-10T15:37:55,427 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/1b77dfc0f836456d9a4594fd03d81f39 2024-12-10T15:37:55,429 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/611e26daddc347629c84107230d22776 2024-12-10T15:37:55,430 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/65f3495ae0ac42318e3b88c3645dad2d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/65f3495ae0ac42318e3b88c3645dad2d 2024-12-10T15:37:55,430 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/ccfc34aade5441c88c5eebe4e17050ae 2024-12-10T15:37:55,430 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/30cba97737ad4f32a3cdcf902cf58c87 2024-12-10T15:37:55,430 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/6c27e04835594f21ac3c136913f1c24a 2024-12-10T15:37:55,534 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/e2979f5bb0734fbb93daaad4119de694 2024-12-10T15:37:55,534 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/983060ace085499183638bd9e0f28ef3 2024-12-10T15:37:55,535 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2dde417be2eb40e9b8e99f1e29782934, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/56e2aaca8451416c86832526eea2ddc4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/d845e482be5e44f88dcf063c7591030b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dcbc186b94ab45bd87743aeacdf74b66, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/0e8ebdb5590e4883bdb217ea582d8254, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458] to archive 2024-12-10T15:37:55,536 DEBUG [StoreCloser-TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:37:55,537 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/84d9484d1d4f4161af17c388968881a9 2024-12-10T15:37:55,537 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ca14d95568c043dfb1bf282a8687be91 2024-12-10T15:37:55,537 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/6884c9c40d7e4e12b3662e855a27a98d 2024-12-10T15:37:55,538 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/56e2aaca8451416c86832526eea2ddc4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/56e2aaca8451416c86832526eea2ddc4 2024-12-10T15:37:55,538 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e14913ec1a2a440ab472fbdf4f506401 2024-12-10T15:37:55,584 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2525341f90b74aa28e9d24738e09bc87 2024-12-10T15:37:55,584 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2dde417be2eb40e9b8e99f1e29782934 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/2dde417be2eb40e9b8e99f1e29782934 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/a476be899d9c4eb78bf568cc0a129dd4 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/88e45d8bbf9047a78c1faaf05499abbd 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/4975974af80d4103903a32066c6511db 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/ebe10dd97a4e4bf88428cc39a3f927ed 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/67e9f9bbee8949bc926b6161301c0035 2024-12-10T15:37:55,585 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/d845e482be5e44f88dcf063c7591030b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/d845e482be5e44f88dcf063c7591030b 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/7feddf49b9494b5e97f38116acc786b0 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/e95e5a62bfc246168ef2922e772d73f3 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/cb300d199d9b49ab8b95c54213f9f69c 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/efd201e10f3a46d8b27e3e6be4573496 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dcbc186b94ab45bd87743aeacdf74b66 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dcbc186b94ab45bd87743aeacdf74b66 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/1228bddb665b422ab62d5abf306caa60 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/0e8ebdb5590e4883bdb217ea582d8254 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/0e8ebdb5590e4883bdb217ea582d8254 2024-12-10T15:37:55,586 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/f02d002975e848dd9fb798ffd1c79163 2024-12-10T15:37:55,587 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/959d542454f748a898744cc460e34b29 2024-12-10T15:37:55,587 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/5ec7fcd8c50e42c7b48afbbb47278458 2024-12-10T15:37:55,590 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits/375.seqid, newMaxSeqId=375, maxSeqId=4 2024-12-10T15:37:55,591 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7. 2024-12-10T15:37:55,591 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] regionserver.HRegion(1635): Region close journal for cd3195b888bdde70b3568541344b4bc7: 2024-12-10T15:37:55,632 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=117}] handler.UnassignRegionHandler(170): Closed cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:37:55,634 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=cd3195b888bdde70b3568541344b4bc7, regionState=CLOSED 2024-12-10T15:37:55,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-10T15:37:55,707 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseRegionProcedure cd3195b888bdde70b3568541344b4bc7, server=bf0fec90ff6d,46239,1733844953049 in 8.8140 sec 2024-12-10T15:37:55,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=116, resume processing ppid=115 2024-12-10T15:37:55,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cd3195b888bdde70b3568541344b4bc7, UNASSIGN in 8.8180 sec 2024-12-10T15:37:55,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-10T15:37:55,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 8.8200 sec 2024-12-10T15:37:55,710 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845075709"}]},"ts":"1733845075709"} 2024-12-10T15:37:55,710 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:37:55,919 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:37:55,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 9.7150 sec 2024-12-10T15:38:01,116 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:38:02,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:38:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-10T15:38:04,321 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-10T15:38:04,321 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:38:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:04,322 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=118, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T15:38:04,323 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=118, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:04,325 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,326 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits] 2024-12-10T15:38:04,328 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/54d37547fdf14be28b3df3707acd6c7c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/54d37547fdf14be28b3df3707acd6c7c 2024-12-10T15:38:04,328 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/33483552fa6943fdb3508a47ca2f32dc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/33483552fa6943fdb3508a47ca2f32dc 2024-12-10T15:38:04,328 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/8c26c3d078324f699e696ea28024c8a3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/A/8c26c3d078324f699e696ea28024c8a3 2024-12-10T15:38:04,365 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/71f690835bef4cc7a0c156ed63abdc75 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/71f690835bef4cc7a0c156ed63abdc75 2024-12-10T15:38:04,365 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3a9fc3fe5e4c46b8abbc621a999b6a30 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/3a9fc3fe5e4c46b8abbc621a999b6a30 2024-12-10T15:38:04,365 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/779695705ff04bfea4c78d8017ae545b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/B/779695705ff04bfea4c78d8017ae545b 2024-12-10T15:38:04,367 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/903f69735f52421da2ff3d1d80e51b70 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/903f69735f52421da2ff3d1d80e51b70 2024-12-10T15:38:04,367 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/b72be756dbca463e844e475b70b59683 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/b72be756dbca463e844e475b70b59683 2024-12-10T15:38:04,367 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dc3a5b6cf55d491dbf257359d592b2b6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/C/dc3a5b6cf55d491dbf257359d592b2b6 2024-12-10T15:38:04,386 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits/375.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7/recovered.edits/375.seqid 2024-12-10T15:38:04,387 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,387 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:38:04,387 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:38:04,388 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T15:38:04,392 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005b0ece4fd9f478f8449cec934f9cf4b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005b0ece4fd9f478f8449cec934f9cf4b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,393 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210055e1ec3ed9e488ebb749476510a0e0d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210055e1ec3ed9e488ebb749476510a0e0d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,393 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101b869337e8034224a582bbc237eac74b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101b869337e8034224a582bbc237eac74b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T15:38:04,495 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068a1b3791bee4c0b94ac5efe81010377_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068a1b3791bee4c0b94ac5efe81010377_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,496 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a1ea6014df4940778cbf8050b5a0484d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a1ea6014df4940778cbf8050b5a0484d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,496 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121088b54f50bfda46d483c2bba751046a8a_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121088b54f50bfda46d483c2bba751046a8a_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,496 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210997c49e7e0c54e27b085f6c10ba24fb1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210997c49e7e0c54e27b085f6c10ba24fb1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,496 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210acb134ca6158453ea2f706dabebe1df1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210acb134ca6158453ea2f706dabebe1df1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,496 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b81571df78264a70a13a448c42ebefc1_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210b81571df78264a70a13a448c42ebefc1_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,522 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c799c5dfddef453689a0c2dcb939339c_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c799c5dfddef453689a0c2dcb939339c_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,522 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210caf0ea1c3b984739973cded055c88d52_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210caf0ea1c3b984739973cded055c88d52_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,522 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d1d11f7fb42b44849b344a83f630a242_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d1d11f7fb42b44849b344a83f630a242_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,522 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dceb3d8d19924363b1af98155f16c74f_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210dceb3d8d19924363b1af98155f16c74f_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d661369fcbe640e1bd90e547ffce7ae0_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d661369fcbe640e1bd90e547ffce7ae0_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210db006e7255b0411f8411209355e4207b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210db006e7255b0411f8411209355e4207b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e38b3e433cb54481b62ba22e4292621c_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e38b3e433cb54481b62ba22e4292621c_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e84b0f30248c4e26984b36933ef9d15d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210e84b0f30248c4e26984b36933ef9d15d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f90b9dcd9d024e5f920819b8bf224051_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210f90b9dcd9d024e5f920819b8bf224051_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eb7dc945e8d242f7b1a8e05c69e9159d_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210eb7dc945e8d242f7b1a8e05c69e9159d_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,554 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fdfae6e8a05044d78859ab2cc773f91b_cd3195b888bdde70b3568541344b4bc7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210fdfae6e8a05044d78859ab2cc773f91b_cd3195b888bdde70b3568541344b4bc7 2024-12-10T15:38:04,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T15:38:04,649 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:38:04,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=118, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:04,652 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:38:04,653 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:38:04,654 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=118, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:04,654 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:38:04,654 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733845084654"}]},"ts":"9223372036854775807"} 2024-12-10T15:38:04,655 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:38:04,655 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cd3195b888bdde70b3568541344b4bc7, NAME => 'TestAcidGuarantees,,1733845041411.cd3195b888bdde70b3568541344b4bc7.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:38:04,655 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:38:04,655 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733845084655"}]},"ts":"9223372036854775807"} 2024-12-10T15:38:04,656 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:38:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T15:38:05,044 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=118, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:05,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 723 msec 2024-12-10T15:38:05,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-10T15:38:05,425 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-10T15:38:05,433 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=243 (was 245), OpenFileDescriptor=438 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1224 (was 1076) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=264 (was 1899) 2024-12-10T15:38:05,439 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=243, OpenFileDescriptor=438, MaxFileDescriptor=1048576, SystemLoadAverage=1224, ProcessCount=11, AvailableMemoryMB=263 2024-12-10T15:38:05,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:38:05,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:38:05,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=119, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:05,442 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:38:05,442 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:05,442 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 119 2024-12-10T15:38:05,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:05,442 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:38:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742311_1487 (size=963) 2024-12-10T15:38:05,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:05,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:05,848 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:38:05,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742312_1488 (size=53) 2024-12-10T15:38:06,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7b74038778882593ac40a176deaf1ba7, disabling compactions & flushes 2024-12-10T15:38:06,252 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. after waiting 0 ms 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:06,252 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:06,252 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:06,253 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:38:06,253 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733845086253"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733845086253"}]},"ts":"1733845086253"} 2024-12-10T15:38:06,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:38:06,254 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:38:06,254 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845086254"}]},"ts":"1733845086254"} 2024-12-10T15:38:06,254 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:38:06,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:06,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, ASSIGN}] 2024-12-10T15:38:06,806 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, ASSIGN 2024-12-10T15:38:06,807 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=119, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:38:06,957 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=7b74038778882593ac40a176deaf1ba7, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:06,958 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:38:07,044 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=bf0fec90ff6d,46239,1733844953049, timeout=1733845087109, delay=65, operations=[pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049]] 2024-12-10T15:38:07,110 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:07,112 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:07,112 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:38:07,113 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,113 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:38:07,113 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,113 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,114 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,114 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:07,114 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b74038778882593ac40a176deaf1ba7 columnFamilyName A 2024-12-10T15:38:07,114 DEBUG [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:07,115 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(327): Store=7b74038778882593ac40a176deaf1ba7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:07,115 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,115 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:07,115 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b74038778882593ac40a176deaf1ba7 columnFamilyName B 2024-12-10T15:38:07,115 DEBUG [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:07,116 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(327): Store=7b74038778882593ac40a176deaf1ba7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:07,116 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,116 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:07,116 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7b74038778882593ac40a176deaf1ba7 columnFamilyName C 2024-12-10T15:38:07,116 DEBUG [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:07,117 INFO [StoreOpener-7b74038778882593ac40a176deaf1ba7-1 {}] regionserver.HStore(327): Store=7b74038778882593ac40a176deaf1ba7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:07,117 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:07,117 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,117 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,118 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:38:07,119 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:07,120 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:38:07,120 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 7b74038778882593ac40a176deaf1ba7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66605720, jitterRate=-0.007497429847717285}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:38:07,121 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:07,121 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., pid=121, masterSystemTime=1733845087109 2024-12-10T15:38:07,122 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:07,122 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:07,122 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=7b74038778882593ac40a176deaf1ba7, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:07,123 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-10T15:38:07,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 in 165 msec 2024-12-10T15:38:07,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=119 2024-12-10T15:38:07,124 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=119, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, ASSIGN in 319 msec 2024-12-10T15:38:07,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:38:07,125 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845087125"}]},"ts":"1733845087125"} 2024-12-10T15:38:07,125 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:38:07,403 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=119, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:38:07,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.9620 sec 2024-12-10T15:38:07,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=119 2024-12-10T15:38:07,546 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 119 completed 2024-12-10T15:38:07,547 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-12-10T15:38:07,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:07,739 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:07,740 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:07,741 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:38:07,742 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:38:07,743 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-12-10T15:38:07,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:07,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-12-10T15:38:07,917 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:38:08,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,090 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-12-10T15:38:08,289 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,289 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-12-10T15:38:08,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-10T15:38:08,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,491 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-10T15:38:08,644 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,645 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-12-10T15:38:08,765 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,766 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51196534 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c2725 2024-12-10T15:38:08,860 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405c04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,861 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc5e114 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d49886 2024-12-10T15:38:08,962 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d92042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:08,963 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e96b8ad to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@635b1751 2024-12-10T15:38:09,085 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593af048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:09,086 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17e5a47d to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cbfd84f 2024-12-10T15:38:09,190 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2209c520, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:09,192 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:09,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-10T15:38:09,193 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:09,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:09,193 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:09,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:09,195 DEBUG [hconnection-0x5295c7fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,195 DEBUG [hconnection-0x20204489-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,197 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,199 DEBUG [hconnection-0x239fc8f6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,200 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43414, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,200 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,200 DEBUG [hconnection-0x7da7a5e7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,200 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43432, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,203 DEBUG [hconnection-0x3784265c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,204 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:09,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:09,208 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:09,211 DEBUG [hconnection-0x65612578-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,211 DEBUG [hconnection-0x29816763-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,211 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,212 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,215 DEBUG [hconnection-0x631c4d38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,216 DEBUG [hconnection-0x321b17f0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,216 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43458, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,217 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845149220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 3 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845149221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845149221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845149221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,228 DEBUG [hconnection-0x6239f3a4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:09,229 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:09,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845149230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/98a7f466bb6b4c999d21f45091fc2139 is 50, key is test_row_0/A:col10/1733845089206/Put/seqid=0 2024-12-10T15:38:09,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:09,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742313_1489 (size=19021) 2024-12-10T15:38:09,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845149322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845149323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845149323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845149323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845149331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,345 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:09,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:09,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:09,497 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:09,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:09,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,498 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845149525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845149527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845149527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845149528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845149533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,649 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:09,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:09,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/98a7f466bb6b4c999d21f45091fc2139 2024-12-10T15:38:09,725 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7fc1659739c241d299ee71d2487de1ee is 50, key is test_row_0/B:col10/1733845089206/Put/seqid=0 2024-12-10T15:38:09,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742314_1490 (size=12001) 2024-12-10T15:38:09,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:09,803 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:09,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:09,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845149829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845149830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845149831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845149832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845149838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,956 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:09,956 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:09,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:09,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:09,957 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,110 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:10,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:10,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7fc1659739c241d299ee71d2487de1ee 2024-12-10T15:38:10,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/51249cb9ab7c404890723366d2e50495 is 50, key is test_row_0/C:col10/1733845089206/Put/seqid=0 2024-12-10T15:38:10,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742315_1491 (size=12001) 2024-12-10T15:38:10,262 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:10,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:10,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,263 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:10,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:10,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845150335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,337 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:10,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845150335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845150336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:10,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845150337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:10,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845150342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,414 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,415 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:10,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:10,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:10,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:10,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:10,623 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/51249cb9ab7c404890723366d2e50495 2024-12-10T15:38:10,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/98a7f466bb6b4c999d21f45091fc2139 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139 2024-12-10T15:38:10,634 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139, entries=300, sequenceid=13, filesize=18.6 K 2024-12-10T15:38:10,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7fc1659739c241d299ee71d2487de1ee as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee 2024-12-10T15:38:10,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T15:38:10,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/51249cb9ab7c404890723366d2e50495 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495 2024-12-10T15:38:10,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495, entries=150, sequenceid=13, filesize=11.7 K 2024-12-10T15:38:10,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 7b74038778882593ac40a176deaf1ba7 in 1512ms, sequenceid=13, compaction requested=false 2024-12-10T15:38:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:10,721 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:10,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-10T15:38:10,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,722 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:10,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/27a59da5167f404b9bbcbf4490feeac7 is 50, key is test_row_0/A:col10/1733845089219/Put/seqid=0 2024-12-10T15:38:10,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742316_1492 (size=12001) 2024-12-10T15:38:10,749 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/27a59da5167f404b9bbcbf4490feeac7 2024-12-10T15:38:10,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/bb7a1069ceeb4d47978b0725604a3f67 is 50, key is test_row_0/B:col10/1733845089219/Put/seqid=0 2024-12-10T15:38:10,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742317_1493 (size=12001) 2024-12-10T15:38:10,780 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/bb7a1069ceeb4d47978b0725604a3f67 2024-12-10T15:38:10,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4acf0503856b40618d019f4042f76d23 is 50, key is test_row_0/C:col10/1733845089219/Put/seqid=0 2024-12-10T15:38:10,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742318_1494 (size=12001) 2024-12-10T15:38:10,825 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4acf0503856b40618d019f4042f76d23 2024-12-10T15:38:10,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/27a59da5167f404b9bbcbf4490feeac7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7 2024-12-10T15:38:10,843 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T15:38:10,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/bb7a1069ceeb4d47978b0725604a3f67 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67 2024-12-10T15:38:10,849 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T15:38:10,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4acf0503856b40618d019f4042f76d23 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23 2024-12-10T15:38:10,860 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23, entries=150, sequenceid=38, filesize=11.7 K 2024-12-10T15:38:10,861 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 7b74038778882593ac40a176deaf1ba7 in 138ms, sequenceid=38, compaction requested=false 2024-12-10T15:38:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:10,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-10T15:38:10,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-10T15:38:10,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-10T15:38:10,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6680 sec 2024-12-10T15:38:10,864 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.6710 sec 2024-12-10T15:38:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-10T15:38:11,296 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-10T15:38:11,300 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-10T15:38:11,301 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:11,302 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:11,302 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:11,352 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:11,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:11,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845151376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,381 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845151377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845151378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845151378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845151381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:11,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/2dc17561605f4de0be429157e53189e3 is 50, key is test_row_0/A:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:11,453 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:11,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:11,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742319_1495 (size=12001) 2024-12-10T15:38:11,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/2dc17561605f4de0be429157e53189e3 2024-12-10T15:38:11,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/98fe875f6f5345469c0e408022681d1d is 50, key is test_row_0/B:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:11,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845151482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845151482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845151486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845151486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845151486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742320_1496 (size=12001) 2024-12-10T15:38:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:11,606 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:11,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:11,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845151688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845151689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845151690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845151695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845151695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,758 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:11,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:11,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:11,906 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/98fe875f6f5345469c0e408022681d1d 2024-12-10T15:38:11,910 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:11,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:11,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:11,911 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:11,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/d4f609e7d36043e5ab7e663596a0be43 is 50, key is test_row_0/C:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:11,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742321_1497 (size=12001) 2024-12-10T15:38:11,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845151994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845151995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:11,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845151996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845152002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845152002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,063 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,066 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:12,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:12,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:12,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/d4f609e7d36043e5ab7e663596a0be43 2024-12-10T15:38:12,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/2dc17561605f4de0be429157e53189e3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3 2024-12-10T15:38:12,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T15:38:12,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/98fe875f6f5345469c0e408022681d1d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d 2024-12-10T15:38:12,346 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T15:38:12,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/d4f609e7d36043e5ab7e663596a0be43 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43 2024-12-10T15:38:12,350 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43, entries=150, sequenceid=49, filesize=11.7 K 2024-12-10T15:38:12,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b74038778882593ac40a176deaf1ba7 in 999ms, sequenceid=49, compaction requested=true 2024-12-10T15:38:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:12,351 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:12,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:12,352 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,352 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,352 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.2 K 2024-12-10T15:38:12,352 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=42.0 K 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fc1659739c241d299ee71d2487de1ee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845089206 2024-12-10T15:38:12,352 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98a7f466bb6b4c999d21f45091fc2139, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845089205 2024-12-10T15:38:12,353 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting bb7a1069ceeb4d47978b0725604a3f67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733845089217 2024-12-10T15:38:12,353 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27a59da5167f404b9bbcbf4490feeac7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733845089217 2024-12-10T15:38:12,353 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 98fe875f6f5345469c0e408022681d1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:12,353 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2dc17561605f4de0be429157e53189e3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:12,360 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#416 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:12,360 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0bff59617a7b4ee8bda9f0cdd5fc9e83 is 50, key is test_row_0/B:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:12,368 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:12,369 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e8404abff83b40fda8d3032a89f0cb42 is 50, key is test_row_0/A:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:12,376 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,377 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:12,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:12,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:38:12,416 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-10T15:38:12,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/d8f5849447824e93b6419d600f3df6d1 is 50, key is test_row_0/A:col10/1733845091376/Put/seqid=0 2024-12-10T15:38:12,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742324_1500 (size=12001) 2024-12-10T15:38:12,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742323_1499 (size=12104) 2024-12-10T15:38:12,464 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e8404abff83b40fda8d3032a89f0cb42 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e8404abff83b40fda8d3032a89f0cb42 2024-12-10T15:38:12,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742322_1498 (size=12104) 2024-12-10T15:38:12,468 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into e8404abff83b40fda8d3032a89f0cb42(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:12,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:12,468 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845092351; duration=0sec 2024-12-10T15:38:12,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:12,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:12,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:12,469 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:12,469 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:12,469 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:12,469 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.2 K 2024-12-10T15:38:12,470 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0bff59617a7b4ee8bda9f0cdd5fc9e83 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0bff59617a7b4ee8bda9f0cdd5fc9e83 2024-12-10T15:38:12,470 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51249cb9ab7c404890723366d2e50495, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733845089206 2024-12-10T15:38:12,470 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4acf0503856b40618d019f4042f76d23, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733845089217 2024-12-10T15:38:12,470 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4f609e7d36043e5ab7e663596a0be43, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:12,473 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 0bff59617a7b4ee8bda9f0cdd5fc9e83(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:12,473 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:12,473 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845092351; duration=0sec 2024-12-10T15:38:12,473 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:12,473 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:12,476 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#419 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:12,476 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/f30d0e58270d4b0f89f2b4405c6811b8 is 50, key is test_row_0/C:col10/1733845091347/Put/seqid=0 2024-12-10T15:38:12,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742325_1501 (size=12104) 2024-12-10T15:38:12,488 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/f30d0e58270d4b0f89f2b4405c6811b8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f30d0e58270d4b0f89f2b4405c6811b8 2024-12-10T15:38:12,492 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into f30d0e58270d4b0f89f2b4405c6811b8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:12,492 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:12,492 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845092351; duration=0sec 2024-12-10T15:38:12,492 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:12,492 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:12,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:12,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:12,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845152507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845152509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845152509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845152510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845152510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845152612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845152614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845152614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845152817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845152818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:12,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845152824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:12,861 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/d8f5849447824e93b6419d600f3df6d1 2024-12-10T15:38:12,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/fa8b09f47ff744769083fdacbef847c9 is 50, key is test_row_0/B:col10/1733845091376/Put/seqid=0 2024-12-10T15:38:12,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742326_1502 (size=12001) 2024-12-10T15:38:13,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845153121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845153122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845153129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,327 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/fa8b09f47ff744769083fdacbef847c9 2024-12-10T15:38:13,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7f46083540d24255ae48587c9c2dedfa is 50, key is test_row_0/C:col10/1733845091376/Put/seqid=0 2024-12-10T15:38:13,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742327_1503 (size=12001) 2024-12-10T15:38:13,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:13,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845153510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845153515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845153627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845153627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845153631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:13,745 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7f46083540d24255ae48587c9c2dedfa 2024-12-10T15:38:13,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/d8f5849447824e93b6419d600f3df6d1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1 2024-12-10T15:38:13,752 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T15:38:13,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/fa8b09f47ff744769083fdacbef847c9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9 2024-12-10T15:38:13,755 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T15:38:13,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7f46083540d24255ae48587c9c2dedfa as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa 2024-12-10T15:38:13,762 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa, entries=150, sequenceid=74, filesize=11.7 K 2024-12-10T15:38:13,763 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b74038778882593ac40a176deaf1ba7 in 1386ms, sequenceid=74, compaction requested=false 2024-12-10T15:38:13,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:13,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:13,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-10T15:38:13,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-10T15:38:13,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-10T15:38:13,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4620 sec 2024-12-10T15:38:13,767 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 2.4660 sec 2024-12-10T15:38:14,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:14,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:14,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,669 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/dbc8f70405674514a08dcc2352a33a85 is 50, key is test_row_0/A:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742328_1504 (size=12001) 2024-12-10T15:38:14,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/dbc8f70405674514a08dcc2352a33a85 2024-12-10T15:38:14,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7b9c950e34e04d5fa5bb88976a5b634d is 50, key is test_row_0/B:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742329_1505 (size=12001) 2024-12-10T15:38:14,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845154715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845154719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845154721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,751 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7b9c950e34e04d5fa5bb88976a5b634d 2024-12-10T15:38:14,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd51544e17ba44bc95688200e4e55c82 is 50, key is test_row_0/C:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742330_1506 (size=12001) 2024-12-10T15:38:14,803 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd51544e17ba44bc95688200e4e55c82 2024-12-10T15:38:14,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/dbc8f70405674514a08dcc2352a33a85 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85 2024-12-10T15:38:14,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T15:38:14,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7b9c950e34e04d5fa5bb88976a5b634d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d 2024-12-10T15:38:14,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T15:38:14,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd51544e17ba44bc95688200e4e55c82 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82 2024-12-10T15:38:14,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82, entries=150, sequenceid=90, filesize=11.7 K 2024-12-10T15:38:14,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7b74038778882593ac40a176deaf1ba7 in 185ms, sequenceid=90, compaction requested=true 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:14,825 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:14,825 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:14,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:14,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:14,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:14,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:14,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:14,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:14,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e8404abff83b40fda8d3032a89f0cb42, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.3 K 2024-12-10T15:38:14,827 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:14,827 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0bff59617a7b4ee8bda9f0cdd5fc9e83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.3 K 2024-12-10T15:38:14,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8404abff83b40fda8d3032a89f0cb42, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:14,828 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bff59617a7b4ee8bda9f0cdd5fc9e83, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:14,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8f5849447824e93b6419d600f3df6d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733845091376 2024-12-10T15:38:14,828 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fa8b09f47ff744769083fdacbef847c9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733845091376 2024-12-10T15:38:14,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting dbc8f70405674514a08dcc2352a33a85, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:14,829 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b9c950e34e04d5fa5bb88976a5b634d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:14,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:14,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:14,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:14,838 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#425 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:14,839 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/daec679b0dcf4eb28b3ac47bf480ad8f is 50, key is test_row_0/B:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,841 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:14,841 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9 is 50, key is test_row_0/A:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/3559e476e2184ac782051a0b694eeb73 is 50, key is test_row_0/A:col10/1733845094706/Put/seqid=0 2024-12-10T15:38:14,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742331_1507 (size=12207) 2024-12-10T15:38:14,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845154848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845154858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845154859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,879 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/daec679b0dcf4eb28b3ac47bf480ad8f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/daec679b0dcf4eb28b3ac47bf480ad8f 2024-12-10T15:38:14,884 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into daec679b0dcf4eb28b3ac47bf480ad8f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:14,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:14,884 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845094825; duration=0sec 2024-12-10T15:38:14,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:14,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:14,884 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:14,885 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:14,885 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:14,885 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:14,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742332_1508 (size=14341) 2024-12-10T15:38:14,885 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f30d0e58270d4b0f89f2b4405c6811b8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.3 K 2024-12-10T15:38:14,885 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f30d0e58270d4b0f89f2b4405c6811b8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1733845091347 2024-12-10T15:38:14,885 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f46083540d24255ae48587c9c2dedfa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733845091376 2024-12-10T15:38:14,886 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting fd51544e17ba44bc95688200e4e55c82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:14,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/3559e476e2184ac782051a0b694eeb73 2024-12-10T15:38:14,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742333_1509 (size=12207) 2024-12-10T15:38:14,897 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#428 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:14,897 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/ebf50abddb664603bcaea11adbceb425 is 50, key is test_row_0/C:col10/1733845094639/Put/seqid=0 2024-12-10T15:38:14,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/47288bb1b8d4402681694c0c313d1837 is 50, key is test_row_0/B:col10/1733845094706/Put/seqid=0 2024-12-10T15:38:14,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742334_1510 (size=12207) 2024-12-10T15:38:14,924 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/ebf50abddb664603bcaea11adbceb425 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/ebf50abddb664603bcaea11adbceb425 2024-12-10T15:38:14,930 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into ebf50abddb664603bcaea11adbceb425(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:14,930 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:14,930 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845094825; duration=0sec 2024-12-10T15:38:14,930 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:14,930 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:14,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845154959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845154966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:14,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845154966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:14,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742335_1511 (size=12001) 2024-12-10T15:38:15,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845155164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845155169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845155175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,302 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9 2024-12-10T15:38:15,315 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into c08b7f6fa7c34827a3ae34c4c7bbe3d9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:15,315 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:15,315 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845094825; duration=0sec 2024-12-10T15:38:15,315 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:15,315 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:15,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/47288bb1b8d4402681694c0c313d1837 2024-12-10T15:38:15,401 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8b870e21e3cd4e4fb404d0e2ba1bd10e is 50, key is test_row_0/C:col10/1733845094706/Put/seqid=0 2024-12-10T15:38:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-10T15:38:15,409 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-10T15:38:15,410 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:15,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-10T15:38:15,411 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:15,412 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:15,412 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:15,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T15:38:15,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742336_1512 (size=12001) 2024-12-10T15:38:15,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845155473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845155473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845155490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T15:38:15,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845155527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,530 DEBUG [Thread-2167 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:15,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:15,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845155535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,567 DEBUG [Thread-2172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:15,569 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T15:38:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:15,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T15:38:15,738 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T15:38:15,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:15,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:15,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:15,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:15,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8b870e21e3cd4e4fb404d0e2ba1bd10e 2024-12-10T15:38:15,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/3559e476e2184ac782051a0b694eeb73 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73 2024-12-10T15:38:15,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73, entries=200, sequenceid=115, filesize=14.0 K 2024-12-10T15:38:15,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/47288bb1b8d4402681694c0c313d1837 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837 2024-12-10T15:38:15,873 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T15:38:15,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8b870e21e3cd4e4fb404d0e2ba1bd10e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e 2024-12-10T15:38:15,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e, entries=150, sequenceid=115, filesize=11.7 K 2024-12-10T15:38:15,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 7b74038778882593ac40a176deaf1ba7 in 1043ms, sequenceid=115, compaction requested=false 2024-12-10T15:38:15,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:15,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:15,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:15,892 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:15,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/9ff81071c00c4a1eae400cf43b2593db is 50, key is test_row_0/A:col10/1733845094851/Put/seqid=0 2024-12-10T15:38:15,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742337_1513 (size=12001) 2024-12-10T15:38:15,959 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/9ff81071c00c4a1eae400cf43b2593db 2024-12-10T15:38:15,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e77fa13d28d94f32a5dfc6c035207e26 is 50, key is test_row_0/B:col10/1733845094851/Put/seqid=0 2024-12-10T15:38:15,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:15,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:16,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742338_1514 (size=12001) 2024-12-10T15:38:16,008 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e77fa13d28d94f32a5dfc6c035207e26 2024-12-10T15:38:16,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T15:38:16,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7e6f4626a61f4d16858263092208504f is 50, key is test_row_0/C:col10/1733845094851/Put/seqid=0 2024-12-10T15:38:16,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742339_1515 (size=12001) 2024-12-10T15:38:16,075 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7e6f4626a61f4d16858263092208504f 2024-12-10T15:38:16,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/9ff81071c00c4a1eae400cf43b2593db as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db 2024-12-10T15:38:16,088 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db, entries=150, sequenceid=130, filesize=11.7 K 2024-12-10T15:38:16,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e77fa13d28d94f32a5dfc6c035207e26 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26 2024-12-10T15:38:16,094 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26, entries=150, sequenceid=130, filesize=11.7 K 2024-12-10T15:38:16,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7e6f4626a61f4d16858263092208504f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f 2024-12-10T15:38:16,101 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f, entries=150, sequenceid=130, filesize=11.7 K 2024-12-10T15:38:16,102 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=134.18 KB/137400 for 7b74038778882593ac40a176deaf1ba7 in 210ms, sequenceid=130, compaction requested=true 2024-12-10T15:38:16,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:16,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-10T15:38:16,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-10T15:38:16,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:16,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:38:16,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:16,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:16,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:16,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-10T15:38:16,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 695 msec 2024-12-10T15:38:16,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 717 msec 2024-12-10T15:38:16,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/c3fec18d5f7345628b9d503d91569895 is 50, key is test_row_0/A:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742340_1516 (size=12151) 2024-12-10T15:38:16,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/c3fec18d5f7345628b9d503d91569895 2024-12-10T15:38:16,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845156147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845156149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845156159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/37f4cf8abd68483c8851bb0de67366df is 50, key is test_row_0/B:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742341_1517 (size=12151) 2024-12-10T15:38:16,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/37f4cf8abd68483c8851bb0de67366df 2024-12-10T15:38:16,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845156260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845156262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845156273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,288 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/1f9b102138f94045af0d146d2058ddce is 50, key is test_row_0/C:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742342_1518 (size=12151) 2024-12-10T15:38:16,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845156470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845156470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845156482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-10T15:38:16,516 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-10T15:38:16,520 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-10T15:38:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:16,528 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:16,535 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:16,535 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:16,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:16,687 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T15:38:16,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:16,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:16,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:16,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:16,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/1f9b102138f94045af0d146d2058ddce 2024-12-10T15:38:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/c3fec18d5f7345628b9d503d91569895 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895 2024-12-10T15:38:16,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845156771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845156774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895, entries=150, sequenceid=156, filesize=11.9 K 2024-12-10T15:38:16,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/37f4cf8abd68483c8851bb0de67366df as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df 2024-12-10T15:38:16,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:16,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845156789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df, entries=150, sequenceid=156, filesize=11.9 K 2024-12-10T15:38:16,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/1f9b102138f94045af0d146d2058ddce as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce 2024-12-10T15:38:16,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:16,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce, entries=150, sequenceid=156, filesize=11.9 K 2024-12-10T15:38:16,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7b74038778882593ac40a176deaf1ba7 in 718ms, sequenceid=156, compaction requested=true 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:16,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:38:16,839 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:16,839 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:16,842 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:16,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,843 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:16,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:16,845 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50700 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:16,845 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:16,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=49.5 K 2024-12-10T15:38:16,848 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:16,848 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:16,848 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,848 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/ebf50abddb664603bcaea11adbceb425, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=47.2 K 2024-12-10T15:38:16,848 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c08b7f6fa7c34827a3ae34c4c7bbe3d9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:16,852 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ebf50abddb664603bcaea11adbceb425, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:16,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3559e476e2184ac782051a0b694eeb73, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733845094706 2024-12-10T15:38:16,859 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ff81071c00c4a1eae400cf43b2593db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733845094847 2024-12-10T15:38:16,859 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b870e21e3cd4e4fb404d0e2ba1bd10e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733845094706 2024-12-10T15:38:16,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e6f4626a61f4d16858263092208504f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733845094847 2024-12-10T15:38:16,860 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3fec18d5f7345628b9d503d91569895, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:16,861 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f9b102138f94045af0d146d2058ddce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:16,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/ddcc1bdd1e294a408aec11d10f85200d is 50, key is test_row_0/A:col10/1733845096148/Put/seqid=0 2024-12-10T15:38:16,881 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#438 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:16,882 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4c9cf326bc9d43a59a93f0af07976944 is 50, key is test_row_0/C:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,885 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:16,886 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/96a02f1f7747426990050974dde06467 is 50, key is test_row_0/A:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742344_1520 (size=12493) 2024-12-10T15:38:16,921 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4c9cf326bc9d43a59a93f0af07976944 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4c9cf326bc9d43a59a93f0af07976944 2024-12-10T15:38:16,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742343_1519 (size=12151) 2024-12-10T15:38:16,926 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/ddcc1bdd1e294a408aec11d10f85200d 2024-12-10T15:38:16,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742345_1521 (size=12493) 2024-12-10T15:38:16,927 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into 4c9cf326bc9d43a59a93f0af07976944(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:16,927 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:16,927 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=12, startTime=1733845096839; duration=0sec 2024-12-10T15:38:16,927 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:16,927 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:16,927 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:16,931 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48360 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:16,931 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:16,931 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:16,932 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/daec679b0dcf4eb28b3ac47bf480ad8f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=47.2 K 2024-12-10T15:38:16,933 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting daec679b0dcf4eb28b3ac47bf480ad8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1733845092509 2024-12-10T15:38:16,933 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 47288bb1b8d4402681694c0c313d1837, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733845094706 2024-12-10T15:38:16,934 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e77fa13d28d94f32a5dfc6c035207e26, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733845094847 2024-12-10T15:38:16,934 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 37f4cf8abd68483c8851bb0de67366df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:16,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/cd3e8cb1848c4318bed6ed703f485c68 is 50, key is test_row_0/B:col10/1733845096148/Put/seqid=0 2024-12-10T15:38:16,947 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/96a02f1f7747426990050974dde06467 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/96a02f1f7747426990050974dde06467 2024-12-10T15:38:16,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742346_1522 (size=12151) 2024-12-10T15:38:16,987 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:16,987 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/df2f603f03854079bb72231ee870be3c is 50, key is test_row_0/B:col10/1733845096099/Put/seqid=0 2024-12-10T15:38:16,999 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into 96a02f1f7747426990050974dde06467(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:16,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:16,999 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=12, startTime=1733845096839; duration=0sec 2024-12-10T15:38:16,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:16,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:17,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742347_1523 (size=12493) 2024-12-10T15:38:17,091 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/df2f603f03854079bb72231ee870be3c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/df2f603f03854079bb72231ee870be3c 2024-12-10T15:38:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:17,140 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into df2f603f03854079bb72231ee870be3c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:17,141 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:17,141 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=12, startTime=1733845096839; duration=0sec 2024-12-10T15:38:17,141 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:17,141 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:17,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:17,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:17,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845157347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845157352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845157353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,357 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/cd3e8cb1848c4318bed6ed703f485c68 2024-12-10T15:38:17,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/223f9d79868c4449aadbba2f204a6a3a is 50, key is test_row_0/C:col10/1733845096148/Put/seqid=0 2024-12-10T15:38:17,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742348_1524 (size=12151) 2024-12-10T15:38:17,439 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/223f9d79868c4449aadbba2f204a6a3a 2024-12-10T15:38:17,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845157459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845157459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845157463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/ddcc1bdd1e294a408aec11d10f85200d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d 2024-12-10T15:38:17,520 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d, entries=150, sequenceid=166, filesize=11.9 K 2024-12-10T15:38:17,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/cd3e8cb1848c4318bed6ed703f485c68 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68 2024-12-10T15:38:17,567 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68, entries=150, sequenceid=166, filesize=11.9 K 2024-12-10T15:38:17,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/223f9d79868c4449aadbba2f204a6a3a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a 2024-12-10T15:38:17,605 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a, entries=150, sequenceid=166, filesize=11.9 K 2024-12-10T15:38:17,611 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 7b74038778882593ac40a176deaf1ba7 in 768ms, sequenceid=166, compaction requested=false 2024-12-10T15:38:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-10T15:38:17,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-10T15:38:17,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-10T15:38:17,624 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0840 sec 2024-12-10T15:38:17,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:17,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.1050 sec 2024-12-10T15:38:17,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:17,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:17,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:17,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e62af4730961438eaca3d2d7b2ebc0c4 is 50, key is test_row_0/A:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:17,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845157695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845157696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845157707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742349_1525 (size=12151) 2024-12-10T15:38:17,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845157809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845157812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:17,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845157817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845158019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845158019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845158026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e62af4730961438eaca3d2d7b2ebc0c4 2024-12-10T15:38:18,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/8c136889fd804329b64afdbc569b0d4c is 50, key is test_row_0/B:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:18,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742350_1526 (size=12151) 2024-12-10T15:38:18,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/8c136889fd804329b64afdbc569b0d4c 2024-12-10T15:38:18,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/9788429a77e14996993ee70176b2cb90 is 50, key is test_row_0/C:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:18,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742351_1527 (size=12151) 2024-12-10T15:38:18,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845158331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845158333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845158334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-10T15:38:18,627 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-10T15:38:18,651 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:18,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-10T15:38:18,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T15:38:18,667 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:18,668 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:18,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:18,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/9788429a77e14996993ee70176b2cb90 2024-12-10T15:38:18,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e62af4730961438eaca3d2d7b2ebc0c4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4 2024-12-10T15:38:18,742 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4, entries=150, sequenceid=197, filesize=11.9 K 2024-12-10T15:38:18,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/8c136889fd804329b64afdbc569b0d4c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c 2024-12-10T15:38:18,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c, entries=150, sequenceid=197, filesize=11.9 K 2024-12-10T15:38:18,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/9788429a77e14996993ee70176b2cb90 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90 2024-12-10T15:38:18,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90, entries=150, sequenceid=197, filesize=11.9 K 2024-12-10T15:38:18,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 7b74038778882593ac40a176deaf1ba7 in 1085ms, sequenceid=197, compaction requested=true 2024-12-10T15:38:18,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:18,757 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:18,757 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:18,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:18,758 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:18,758 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:18,758 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:18,758 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/df2f603f03854079bb72231ee870be3c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.9 K 2024-12-10T15:38:18,758 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:18,758 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:18,758 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:18,758 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/96a02f1f7747426990050974dde06467, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.9 K 2024-12-10T15:38:18,758 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96a02f1f7747426990050974dde06467, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:18,759 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting df2f603f03854079bb72231ee870be3c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:18,759 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddcc1bdd1e294a408aec11d10f85200d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733845096146 2024-12-10T15:38:18,759 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting cd3e8cb1848c4318bed6ed703f485c68, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733845096146 2024-12-10T15:38:18,759 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e62af4730961438eaca3d2d7b2ebc0c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:18,759 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c136889fd804329b64afdbc569b0d4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:18,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T15:38:18,778 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#446 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:18,778 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/91a8b4b7d1ed4d468f993c2f8c3288f0 is 50, key is test_row_0/A:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:18,789 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:18,790 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/007d3359c99a4e378fc6257964cb0387 is 50, key is test_row_0/B:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:18,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-10T15:38:18,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:18,823 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T15:38:18,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:18,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:18,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:18,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:18,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:18,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:18,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742352_1528 (size=12595) 2024-12-10T15:38:18,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742353_1529 (size=12595) 2024-12-10T15:38:18,843 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/91a8b4b7d1ed4d468f993c2f8c3288f0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/91a8b4b7d1ed4d468f993c2f8c3288f0 2024-12-10T15:38:18,843 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/007d3359c99a4e378fc6257964cb0387 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/007d3359c99a4e378fc6257964cb0387 2024-12-10T15:38:18,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/19b903afb72d48b291088d9984f25bbf is 50, key is test_row_0/A:col10/1733845097692/Put/seqid=0 2024-12-10T15:38:18,856 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into 91a8b4b7d1ed4d468f993c2f8c3288f0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:18,856 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:18,856 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845098757; duration=0sec 2024-12-10T15:38:18,856 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:18,856 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:18,856 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:18,858 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:18,859 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:18,859 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:18,859 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4c9cf326bc9d43a59a93f0af07976944, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=35.9 K 2024-12-10T15:38:18,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 007d3359c99a4e378fc6257964cb0387(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:18,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:18,860 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845098757; duration=0sec 2024-12-10T15:38:18,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:18,860 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:18,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:18,863 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:18,863 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c9cf326bc9d43a59a93f0af07976944, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733845096099 2024-12-10T15:38:18,864 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 223f9d79868c4449aadbba2f204a6a3a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1733845096146 2024-12-10T15:38:18,864 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9788429a77e14996993ee70176b2cb90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:18,881 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#449 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:18,881 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4337be7a8e2c4c2594e9f9eb6d74e910 is 50, key is test_row_0/C:col10/1733845097667/Put/seqid=0 2024-12-10T15:38:18,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742354_1530 (size=12151) 2024-12-10T15:38:18,887 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/19b903afb72d48b291088d9984f25bbf 2024-12-10T15:38:18,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f5feafa4e2b047a4b27e914fd119ca3e is 50, key is test_row_0/B:col10/1733845097692/Put/seqid=0 2024-12-10T15:38:18,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742355_1531 (size=12595) 2024-12-10T15:38:18,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742356_1532 (size=12151) 2024-12-10T15:38:18,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T15:38:18,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4337be7a8e2c4c2594e9f9eb6d74e910 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4337be7a8e2c4c2594e9f9eb6d74e910 2024-12-10T15:38:18,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845158973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845158973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:18,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845158983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:18,999 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into 4337be7a8e2c4c2594e9f9eb6d74e910(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:18,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:18,999 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845098757; duration=0sec 2024-12-10T15:38:18,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:18,999 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:19,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845159082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845159083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845159108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T15:38:19,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845159291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845159291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845159327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,359 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f5feafa4e2b047a4b27e914fd119ca3e 2024-12-10T15:38:19,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/a9e31e49551d4871a4428756327cfef2 is 50, key is test_row_0/C:col10/1733845097692/Put/seqid=0 2024-12-10T15:38:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742357_1533 (size=12151) 2024-12-10T15:38:19,448 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/a9e31e49551d4871a4428756327cfef2 2024-12-10T15:38:19,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/19b903afb72d48b291088d9984f25bbf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf 2024-12-10T15:38:19,495 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf, entries=150, sequenceid=205, filesize=11.9 K 2024-12-10T15:38:19,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f5feafa4e2b047a4b27e914fd119ca3e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e 2024-12-10T15:38:19,539 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e, entries=150, sequenceid=205, filesize=11.9 K 2024-12-10T15:38:19,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/a9e31e49551d4871a4428756327cfef2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2 2024-12-10T15:38:19,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43458 deadline: 1733845159568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,576 DEBUG [Thread-2167 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8197 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:19,587 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2, entries=150, sequenceid=205, filesize=11.9 K 2024-12-10T15:38:19,591 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 7b74038778882593ac40a176deaf1ba7 in 768ms, sequenceid=205, compaction requested=false 2024-12-10T15:38:19,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:19,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:19,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-10T15:38:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-10T15:38:19,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-10T15:38:19,600 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 931 msec 2024-12-10T15:38:19,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 950 msec 2024-12-10T15:38:19,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:19,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-10T15:38:19,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:19,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:19,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:19,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:19,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:19,607 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:19,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845159627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845159631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845159634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845159637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/f526f0880fcf4f3ea1cf2413ec14998d is 50, key is test_row_0/A:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:19,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742358_1534 (size=12151) 2024-12-10T15:38:19,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845159737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845159743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845159746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-10T15:38:19,769 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-10T15:38:19,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:19,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-10T15:38:19,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:19,781 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:19,782 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:19,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:19,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:19,936 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:19,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:19,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:19,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:19,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:19,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:19,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845159944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845159950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:19,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845159957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:20,098 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/f526f0880fcf4f3ea1cf2413ec14998d 2024-12-10T15:38:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/feee833bc37345ada56674a800b0d40c is 50, key is test_row_0/B:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:20,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845160148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742359_1535 (size=12151) 2024-12-10T15:38:20,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:38:20,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845160249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,255 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845160253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845160270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:20,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/feee833bc37345ada56674a800b0d40c 2024-12-10T15:38:20,577 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/6576301b304d4ed4ab5485df32988590 is 50, key is test_row_0/C:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:20,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742360_1536 (size=12151) 2024-12-10T15:38:20,732 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845160758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845160760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:20,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845160777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,886 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:20,886 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:20,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:20,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:20,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:20,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:20,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/6576301b304d4ed4ab5485df32988590 2024-12-10T15:38:21,039 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,039 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/f526f0880fcf4f3ea1cf2413ec14998d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d 2024-12-10T15:38:21,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T15:38:21,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/feee833bc37345ada56674a800b0d40c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c 2024-12-10T15:38:21,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T15:38:21,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/6576301b304d4ed4ab5485df32988590 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590 2024-12-10T15:38:21,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T15:38:21,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 7b74038778882593ac40a176deaf1ba7 in 1502ms, sequenceid=237, compaction requested=true 2024-12-10T15:38:21,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:21,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:38:21,109 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:21,109 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:21,110 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:21,110 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:21,110 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,110 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/91a8b4b7d1ed4d468f993c2f8c3288f0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.0 K 2024-12-10T15:38:21,110 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:21,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:21,111 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,111 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4337be7a8e2c4c2594e9f9eb6d74e910, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.0 K 2024-12-10T15:38:21,111 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91a8b4b7d1ed4d468f993c2f8c3288f0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:21,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4337be7a8e2c4c2594e9f9eb6d74e910, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:21,111 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19b903afb72d48b291088d9984f25bbf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733845097679 2024-12-10T15:38:21,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a9e31e49551d4871a4428756327cfef2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733845097679 2024-12-10T15:38:21,112 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f526f0880fcf4f3ea1cf2413ec14998d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,112 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6576301b304d4ed4ab5485df32988590, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,143 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#455 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,143 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/f5ea53c863124e8085dbd1174739007a is 50, key is test_row_0/C:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:21,146 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,146 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/ddd9e624a0614ecbbe0e2d387f73712f is 50, key is test_row_0/A:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:21,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742361_1537 (size=12697) 2024-12-10T15:38:21,199 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:21,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:21,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:21,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742362_1538 (size=12697) 2024-12-10T15:38:21,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,217 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/f5ea53c863124e8085dbd1174739007a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f5ea53c863124e8085dbd1174739007a 2024-12-10T15:38:21,218 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/984c66a4508e4e2dab488853269612cd is 50, key is test_row_0/A:col10/1733845101201/Put/seqid=0 2024-12-10T15:38:21,225 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into f5ea53c863124e8085dbd1174739007a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,225 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,225 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845101109; duration=0sec 2024-12-10T15:38:21,225 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:21,225 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:21,226 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:21,227 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:21,227 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:21,228 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,228 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/007d3359c99a4e378fc6257964cb0387, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.0 K 2024-12-10T15:38:21,228 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 007d3359c99a4e378fc6257964cb0387, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733845097352 2024-12-10T15:38:21,228 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f5feafa4e2b047a4b27e914fd119ca3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1733845097679 2024-12-10T15:38:21,229 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting feee833bc37345ada56674a800b0d40c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742363_1539 (size=16931) 2024-12-10T15:38:21,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/984c66a4508e4e2dab488853269612cd 2024-12-10T15:38:21,257 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,258 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/8e2779d4ee394107ac6361073a4069a6 is 50, key is test_row_0/B:col10/1733845098944/Put/seqid=0 2024-12-10T15:38:21,280 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0411940ba7fa4125acbd209e92a33ef5 is 50, key is test_row_0/B:col10/1733845101201/Put/seqid=0 2024-12-10T15:38:21,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742364_1540 (size=12697) 2024-12-10T15:38:21,312 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/8e2779d4ee394107ac6361073a4069a6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8e2779d4ee394107ac6361073a4069a6 2024-12-10T15:38:21,322 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 8e2779d4ee394107ac6361073a4069a6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,323 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,323 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845101109; duration=0sec 2024-12-10T15:38:21,323 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,323 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:21,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742365_1541 (size=12151) 2024-12-10T15:38:21,325 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0411940ba7fa4125acbd209e92a33ef5 2024-12-10T15:38:21,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/020ed929ce3841a18fbc52928f0c2dad is 50, key is test_row_0/C:col10/1733845101201/Put/seqid=0 2024-12-10T15:38:21,363 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742366_1542 (size=12151) 2024-12-10T15:38:21,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/020ed929ce3841a18fbc52928f0c2dad 2024-12-10T15:38:21,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/984c66a4508e4e2dab488853269612cd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd 2024-12-10T15:38:21,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd, entries=250, sequenceid=248, filesize=16.5 K 2024-12-10T15:38:21,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0411940ba7fa4125acbd209e92a33ef5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5 2024-12-10T15:38:21,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:38:21,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/020ed929ce3841a18fbc52928f0c2dad as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad 2024-12-10T15:38:21,426 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad, entries=150, sequenceid=248, filesize=11.9 K 2024-12-10T15:38:21,428 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b74038778882593ac40a176deaf1ba7 in 226ms, sequenceid=248, compaction requested=false 2024-12-10T15:38:21,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:21,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/df595d7399e24735bfdb30e279657137 is 50, key is test_row_0/A:col10/1733845101431/Put/seqid=0 2024-12-10T15:38:21,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742367_1543 (size=14741) 2024-12-10T15:38:21,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/df595d7399e24735bfdb30e279657137 2024-12-10T15:38:21,480 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e540db12406f4dae9ce05b0e187a38e5 is 50, key is test_row_0/B:col10/1733845101431/Put/seqid=0 2024-12-10T15:38:21,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845161480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742368_1544 (size=12301) 2024-12-10T15:38:21,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e540db12406f4dae9ce05b0e187a38e5 2024-12-10T15:38:21,503 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd08018c9cf5426bbe8d4c820dfaaf07 is 50, key is test_row_0/C:col10/1733845101431/Put/seqid=0 2024-12-10T15:38:21,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742369_1545 (size=12301) 2024-12-10T15:38:21,520 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd08018c9cf5426bbe8d4c820dfaaf07 2024-12-10T15:38:21,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/df595d7399e24735bfdb30e279657137 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137 2024-12-10T15:38:21,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137, entries=200, sequenceid=276, filesize=14.4 K 2024-12-10T15:38:21,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/e540db12406f4dae9ce05b0e187a38e5 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5 2024-12-10T15:38:21,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5, entries=150, sequenceid=276, filesize=12.0 K 2024-12-10T15:38:21,547 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/fd08018c9cf5426bbe8d4c820dfaaf07 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07 2024-12-10T15:38:21,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07, entries=150, sequenceid=276, filesize=12.0 K 2024-12-10T15:38:21,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7b74038778882593ac40a176deaf1ba7 in 155ms, sequenceid=276, compaction requested=true 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:21,588 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:21,588 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 3 compacting, 2 eligible, 16 blocking 2024-12-10T15:38:21,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:38:21,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:38:21,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. because compaction request was cancelled 2024-12-10T15:38:21,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:21,589 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:21,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:21,590 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:21,590 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,590 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8e2779d4ee394107ac6361073a4069a6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.3 K 2024-12-10T15:38:21,591 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e2779d4ee394107ac6361073a4069a6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,591 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0411940ba7fa4125acbd209e92a33ef5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845099617 2024-12-10T15:38:21,591 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/ddd9e624a0614ecbbe0e2d387f73712f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddd9e624a0614ecbbe0e2d387f73712f 2024-12-10T15:38:21,591 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e540db12406f4dae9ce05b0e187a38e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733845101408 2024-12-10T15:38:21,597 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into ddd9e624a0614ecbbe0e2d387f73712f(size=12.4 K), total size for store is 43.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,597 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,597 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845101108; duration=0sec 2024-12-10T15:38:21,597 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:21,597 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:21,597 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:21,597 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:21,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:21,598 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:21,598 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:21,598 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,598 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f5ea53c863124e8085dbd1174739007a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.3 K 2024-12-10T15:38:21,601 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#464 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,601 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/9e90ab8379964adaab4d5cad2e18f14b is 50, key is test_row_0/B:col10/1733845101431/Put/seqid=0 2024-12-10T15:38:21,602 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5ea53c863124e8085dbd1174739007a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,602 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 020ed929ce3841a18fbc52928f0c2dad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845099617 2024-12-10T15:38:21,602 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd08018c9cf5426bbe8d4c820dfaaf07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733845101408 2024-12-10T15:38:21,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a0a40773f39b4ba18c81b74cc6ccf0bc is 50, key is test_row_0/A:col10/1733845101592/Put/seqid=0 2024-12-10T15:38:21,639 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#466 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,639 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/6decb32a68104a6590b17cd9ae6c7e13 is 50, key is test_row_0/C:col10/1733845101431/Put/seqid=0 2024-12-10T15:38:21,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742370_1546 (size=12949) 2024-12-10T15:38:21,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742371_1547 (size=12297) 2024-12-10T15:38:21,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a0a40773f39b4ba18c81b74cc6ccf0bc 2024-12-10T15:38:21,656 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/9e90ab8379964adaab4d5cad2e18f14b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/9e90ab8379964adaab4d5cad2e18f14b 2024-12-10T15:38:21,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742372_1548 (size=12949) 2024-12-10T15:38:21,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/927dac75d4d0465fabbe703401720b83 is 50, key is test_row_0/B:col10/1733845101592/Put/seqid=0 2024-12-10T15:38:21,663 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 9e90ab8379964adaab4d5cad2e18f14b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,664 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,664 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845101588; duration=0sec 2024-12-10T15:38:21,664 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,664 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:21,670 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/6decb32a68104a6590b17cd9ae6c7e13 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6decb32a68104a6590b17cd9ae6c7e13 2024-12-10T15:38:21,675 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:21,706 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into 6decb32a68104a6590b17cd9ae6c7e13(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,706 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,706 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845101588; duration=0sec 2024-12-10T15:38:21,706 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,706 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:21,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742373_1549 (size=9857) 2024-12-10T15:38:21,711 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/927dac75d4d0465fabbe703401720b83 2024-12-10T15:38:21,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fb119b6ae374ee887c3e20c73982aa8 is 50, key is test_row_0/C:col10/1733845101592/Put/seqid=0 2024-12-10T15:38:21,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742374_1550 (size=9857) 2024-12-10T15:38:21,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fb119b6ae374ee887c3e20c73982aa8 2024-12-10T15:38:21,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a0a40773f39b4ba18c81b74cc6ccf0bc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc 2024-12-10T15:38:21,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845161789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845161791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845161792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845161794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc, entries=150, sequenceid=288, filesize=12.0 K 2024-12-10T15:38:21,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/927dac75d4d0465fabbe703401720b83 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83 2024-12-10T15:38:21,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83, entries=100, sequenceid=288, filesize=9.6 K 2024-12-10T15:38:21,819 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fb119b6ae374ee887c3e20c73982aa8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8 2024-12-10T15:38:21,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8, entries=100, sequenceid=288, filesize=9.6 K 2024-12-10T15:38:21,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7b74038778882593ac40a176deaf1ba7 in 229ms, sequenceid=288, compaction requested=true 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,826 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,826 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:21,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. because compaction request was cancelled 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. because compaction request was cancelled 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56666 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:21,827 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:21,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,827 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddd9e624a0614ecbbe0e2d387f73712f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=55.3 K 2024-12-10T15:38:21,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddd9e624a0614ecbbe0e2d387f73712f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845098944 2024-12-10T15:38:21,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 984c66a4508e4e2dab488853269612cd, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733845099617 2024-12-10T15:38:21,828 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting df595d7399e24735bfdb30e279657137, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733845101355 2024-12-10T15:38:21,829 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0a40773f39b4ba18c81b74cc6ccf0bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733845101457 2024-12-10T15:38:21,835 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-10T15:38:21,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:21,836 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:21,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:21,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:21,846 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e0f940a0db5143db8723636a1884f44d is 50, key is test_row_0/A:col10/1733845101592/Put/seqid=0 2024-12-10T15:38:21,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/da6adaf4efa146c38f22509cf16eb5d8 is 50, key is test_row_0/A:col10/1733845101774/Put/seqid=0 2024-12-10T15:38:21,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:21,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742375_1551 (size=12983) 2024-12-10T15:38:21,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:21,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:21,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742376_1552 (size=12301) 2024-12-10T15:38:21,925 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845161921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/e0f940a0db5143db8723636a1884f44d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e0f940a0db5143db8723636a1884f44d 2024-12-10T15:38:21,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845161924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:21,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845161925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:21,950 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into e0f940a0db5143db8723636a1884f44d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:21,950 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:21,950 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=12, startTime=1733845101826; duration=0sec 2024-12-10T15:38:21,950 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:21,950 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:22,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845162026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,033 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845162033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845162034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845162231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845162236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845162236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,308 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/da6adaf4efa146c38f22509cf16eb5d8 2024-12-10T15:38:22,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/4e1ff22e84984a398149ee28d77cd619 is 50, key is test_row_0/B:col10/1733845101774/Put/seqid=0 2024-12-10T15:38:22,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742377_1553 (size=12301) 2024-12-10T15:38:22,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845162537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845162539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:22,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845162539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:22,783 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/4e1ff22e84984a398149ee28d77cd619 2024-12-10T15:38:22,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7fce6a196916498d866768e75fcbdb04 is 50, key is test_row_0/C:col10/1733845101774/Put/seqid=0 2024-12-10T15:38:22,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742378_1554 (size=12301) 2024-12-10T15:38:22,841 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7fce6a196916498d866768e75fcbdb04 2024-12-10T15:38:22,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/da6adaf4efa146c38f22509cf16eb5d8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8 2024-12-10T15:38:22,876 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8, entries=150, sequenceid=315, filesize=12.0 K 2024-12-10T15:38:22,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/4e1ff22e84984a398149ee28d77cd619 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619 2024-12-10T15:38:22,884 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619, entries=150, sequenceid=315, filesize=12.0 K 2024-12-10T15:38:22,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7fce6a196916498d866768e75fcbdb04 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04 2024-12-10T15:38:22,892 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04, entries=150, sequenceid=315, filesize=12.0 K 2024-12-10T15:38:22,898 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7b74038778882593ac40a176deaf1ba7 in 1062ms, sequenceid=315, compaction requested=true 2024-12-10T15:38:22,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:22,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:22,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-10T15:38:22,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-10T15:38:22,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-10T15:38:22,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1180 sec 2024-12-10T15:38:22,903 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 3.1250 sec 2024-12-10T15:38:23,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:23,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:23,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:23,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/05b6e822bc74474396a25b6c47e44f2f is 50, key is test_row_0/A:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:23,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742379_1555 (size=14741) 2024-12-10T15:38:23,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845163200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845163211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845163223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845163319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845163322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845163343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/05b6e822bc74474396a25b6c47e44f2f 2024-12-10T15:38:23,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845163523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845163529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5c5f28945c24e1fb3804826458e0cc4 is 50, key is test_row_0/B:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:23,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845163561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742380_1556 (size=12301) 2024-12-10T15:38:23,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845163826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,832 DEBUG [Thread-2172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:23,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845163828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845163841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:23,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845163875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:23,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-10T15:38:23,892 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-10T15:38:23,900 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:23,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-10T15:38:23,903 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:23,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T15:38:23,903 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:23,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:24,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5c5f28945c24e1fb3804826458e0cc4 2024-12-10T15:38:24,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T15:38:24,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/500b0eff31e9443dae597af19b226ade is 50, key is test_row_0/C:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:24,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742381_1557 (size=12301) 2024-12-10T15:38:24,060 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/500b0eff31e9443dae597af19b226ade 2024-12-10T15:38:24,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T15:38:24,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/05b6e822bc74474396a25b6c47e44f2f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f 2024-12-10T15:38:24,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:24,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,067 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:24,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:24,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:24,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f, entries=200, sequenceid=328, filesize=14.4 K 2024-12-10T15:38:24,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5c5f28945c24e1fb3804826458e0cc4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4 2024-12-10T15:38:24,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4, entries=150, sequenceid=328, filesize=12.0 K 2024-12-10T15:38:24,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/500b0eff31e9443dae597af19b226ade as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade 2024-12-10T15:38:24,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade, entries=150, sequenceid=328, filesize=12.0 K 2024-12-10T15:38:24,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7b74038778882593ac40a176deaf1ba7 in 1029ms, sequenceid=328, compaction requested=true 2024-12-10T15:38:24,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:24,080 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:24,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:24,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:24,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:24,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:24,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:24,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:24,085 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:24,083 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:24,086 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:24,086 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,086 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e0f940a0db5143db8723636a1884f44d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=39.1 K 2024-12-10T15:38:24,086 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0f940a0db5143db8723636a1884f44d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733845101408 2024-12-10T15:38:24,086 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47408 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:24,086 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:24,086 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,087 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/9e90ab8379964adaab4d5cad2e18f14b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=46.3 K 2024-12-10T15:38:24,087 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting da6adaf4efa146c38f22509cf16eb5d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733845101733 2024-12-10T15:38:24,087 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e90ab8379964adaab4d5cad2e18f14b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733845101408 2024-12-10T15:38:24,087 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05b6e822bc74474396a25b6c47e44f2f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:24,087 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 927dac75d4d0465fabbe703401720b83, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733845101478 2024-12-10T15:38:24,088 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e1ff22e84984a398149ee28d77cd619, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733845101733 2024-12-10T15:38:24,088 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c5c5f28945c24e1fb3804826458e0cc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:24,101 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#476 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:24,101 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/77349b78d6654003b7ad619789767d64 is 50, key is test_row_0/A:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:24,112 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:24,113 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/267db63be3ba4e27bc66a7e7d693ff64 is 50, key is test_row_0/B:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:24,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742383_1559 (size=13085) 2024-12-10T15:38:24,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742382_1558 (size=13085) 2024-12-10T15:38:24,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T15:38:24,223 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/267db63be3ba4e27bc66a7e7d693ff64 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/267db63be3ba4e27bc66a7e7d693ff64 2024-12-10T15:38:24,231 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-10T15:38:24,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,233 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:24,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:24,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/b5634853aa654ef4b7e18fed26e6acd1 is 50, key is test_row_0/A:col10/1733845103204/Put/seqid=0 2024-12-10T15:38:24,239 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 267db63be3ba4e27bc66a7e7d693ff64(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:24,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:24,239 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=12, startTime=1733845104085; duration=0sec 2024-12-10T15:38:24,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:24,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:24,239 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:24,240 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/77349b78d6654003b7ad619789767d64 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/77349b78d6654003b7ad619789767d64 2024-12-10T15:38:24,242 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47408 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:24,242 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:24,242 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,242 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6decb32a68104a6590b17cd9ae6c7e13, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=46.3 K 2024-12-10T15:38:24,242 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6decb32a68104a6590b17cd9ae6c7e13, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733845101408 2024-12-10T15:38:24,243 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fb119b6ae374ee887c3e20c73982aa8, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733845101478 2024-12-10T15:38:24,243 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fce6a196916498d866768e75fcbdb04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733845101733 2024-12-10T15:38:24,244 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 500b0eff31e9443dae597af19b226ade, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:24,245 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into 77349b78d6654003b7ad619789767d64(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:24,245 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:24,245 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845104080; duration=0sec 2024-12-10T15:38:24,245 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:24,246 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:24,262 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:24,263 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5889091016bb457588f0fd0ad3a4c8d9 is 50, key is test_row_0/C:col10/1733845101923/Put/seqid=0 2024-12-10T15:38:24,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742384_1560 (size=12301) 2024-12-10T15:38:24,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742385_1561 (size=13085) 2024-12-10T15:38:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:24,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:24,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845164374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845164375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845164382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845164483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845164483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T15:38:24,667 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/b5634853aa654ef4b7e18fed26e6acd1 2024-12-10T15:38:24,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/76ded84aaf3f47bdb313478505ed80ea is 50, key is test_row_0/B:col10/1733845103204/Put/seqid=0 2024-12-10T15:38:24,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845164692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845164699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:24,708 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5889091016bb457588f0fd0ad3a4c8d9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5889091016bb457588f0fd0ad3a4c8d9 2024-12-10T15:38:24,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742386_1562 (size=12301) 2024-12-10T15:38:24,733 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/76ded84aaf3f47bdb313478505ed80ea 2024-12-10T15:38:24,743 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into 5889091016bb457588f0fd0ad3a4c8d9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:24,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:24,743 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=12, startTime=1733845104085; duration=0sec 2024-12-10T15:38:24,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:24,743 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:24,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4167c7deccb549e3a139838873b192a2 is 50, key is test_row_0/C:col10/1733845103204/Put/seqid=0 2024-12-10T15:38:24,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742387_1563 (size=12301) 2024-12-10T15:38:24,809 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4167c7deccb549e3a139838873b192a2 2024-12-10T15:38:24,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/b5634853aa654ef4b7e18fed26e6acd1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1 2024-12-10T15:38:24,817 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1, entries=150, sequenceid=352, filesize=12.0 K 2024-12-10T15:38:24,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/76ded84aaf3f47bdb313478505ed80ea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea 2024-12-10T15:38:24,823 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea, entries=150, sequenceid=352, filesize=12.0 K 2024-12-10T15:38:24,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/4167c7deccb549e3a139838873b192a2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2 2024-12-10T15:38:24,829 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2, entries=150, sequenceid=352, filesize=12.0 K 2024-12-10T15:38:24,835 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7b74038778882593ac40a176deaf1ba7 in 603ms, sequenceid=352, compaction requested=false 2024-12-10T15:38:24,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:24,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:24,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-10T15:38:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-10T15:38:24,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-10T15:38:24,837 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 933 msec 2024-12-10T15:38:24,838 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 938 msec 2024-12-10T15:38:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:24,998 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:38:24,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:24,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:24,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:24,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:24,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:24,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-10T15:38:25,008 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-10T15:38:25,009 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-10T15:38:25,014 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:25,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T15:38:25,015 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:25,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:25,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/fc7a5ffff255420aa24eb241787847a2 is 50, key is test_row_0/A:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845165064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742388_1564 (size=14741) 2024-12-10T15:38:25,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845165073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/fc7a5ffff255420aa24eb241787847a2 2024-12-10T15:38:25,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T15:38:25,125 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f913faced63844a29c12162cf5a86d15 is 50, key is test_row_0/B:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742389_1565 (size=12301) 2024-12-10T15:38:25,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f913faced63844a29c12162cf5a86d15 2024-12-10T15:38:25,166 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,166 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T15:38:25,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:25,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,167 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:25,167 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:25,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:25,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/efacf21384984c868c794ee4c2cbe72a is 50, key is test_row_0/C:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845165174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845165179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742390_1566 (size=12301) 2024-12-10T15:38:25,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/efacf21384984c868c794ee4c2cbe72a 2024-12-10T15:38:25,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/fc7a5ffff255420aa24eb241787847a2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2 2024-12-10T15:38:25,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2, entries=200, sequenceid=368, filesize=14.4 K 2024-12-10T15:38:25,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f913faced63844a29c12162cf5a86d15 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15 2024-12-10T15:38:25,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15, entries=150, sequenceid=368, filesize=12.0 K 2024-12-10T15:38:25,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/efacf21384984c868c794ee4c2cbe72a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a 2024-12-10T15:38:25,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a, entries=150, sequenceid=368, filesize=12.0 K 2024-12-10T15:38:25,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7b74038778882593ac40a176deaf1ba7 in 254ms, sequenceid=368, compaction requested=true 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:25,252 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:25,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:25,255 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:25,258 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:25,258 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:25,258 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,258 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/77349b78d6654003b7ad619789767d64, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=39.2 K 2024-12-10T15:38:25,260 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77349b78d6654003b7ad619789767d64, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:25,263 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5634853aa654ef4b7e18fed26e6acd1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845103199 2024-12-10T15:38:25,267 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:25,267 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:25,267 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc7a5ffff255420aa24eb241787847a2, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104358 2024-12-10T15:38:25,267 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,267 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/267db63be3ba4e27bc66a7e7d693ff64, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.8 K 2024-12-10T15:38:25,271 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 267db63be3ba4e27bc66a7e7d693ff64, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:25,279 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 76ded84aaf3f47bdb313478505ed80ea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845103199 2024-12-10T15:38:25,283 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f913faced63844a29c12162cf5a86d15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104366 2024-12-10T15:38:25,309 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#485 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:25,310 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/cddb0e0b2edb4970ba33b91f9f8d5e0b is 50, key is test_row_0/A:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,316 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#486 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:25,316 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/28456c23727c480095ee21a0da981a83 is 50, key is test_row_0/B:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,319 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-10T15:38:25,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,320 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:25,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:25,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T15:38:25,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/4b2ccfee14104fd9bca6a3378f02138c is 50, key is test_row_0/A:col10/1733845105062/Put/seqid=0 2024-12-10T15:38:25,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742391_1567 (size=13187) 2024-12-10T15:38:25,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742392_1568 (size=13187) 2024-12-10T15:38:25,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:25,415 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/28456c23727c480095ee21a0da981a83 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/28456c23727c480095ee21a0da981a83 2024-12-10T15:38:25,415 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/cddb0e0b2edb4970ba33b91f9f8d5e0b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/cddb0e0b2edb4970ba33b91f9f8d5e0b 2024-12-10T15:38:25,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742393_1569 (size=12301) 2024-12-10T15:38:25,440 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 28456c23727c480095ee21a0da981a83(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:25,440 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:25,440 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845105252; duration=0sec 2024-12-10T15:38:25,440 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:25,440 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:25,440 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:25,441 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:25,441 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:25,441 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:25,441 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5889091016bb457588f0fd0ad3a4c8d9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.8 K 2024-12-10T15:38:25,442 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 5889091016bb457588f0fd0ad3a4c8d9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1733845101918 2024-12-10T15:38:25,442 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 4167c7deccb549e3a139838873b192a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733845103199 2024-12-10T15:38:25,442 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting efacf21384984c868c794ee4c2cbe72a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104366 2024-12-10T15:38:25,443 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into cddb0e0b2edb4970ba33b91f9f8d5e0b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:25,443 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:25,443 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845105252; duration=0sec 2024-12-10T15:38:25,443 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:25,443 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:25,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845165459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845165461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845165463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,469 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#488 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:25,469 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/c7416b35a6b34131aec9efb32ff9c17e is 50, key is test_row_0/C:col10/1733845104374/Put/seqid=0 2024-12-10T15:38:25,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742394_1570 (size=13187) 2024-12-10T15:38:25,540 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/c7416b35a6b34131aec9efb32ff9c17e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/c7416b35a6b34131aec9efb32ff9c17e 2024-12-10T15:38:25,545 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into c7416b35a6b34131aec9efb32ff9c17e(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:25,545 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:25,545 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845105252; duration=0sec 2024-12-10T15:38:25,545 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:25,545 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:25,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845165566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845165566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845165566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T15:38:25,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845165776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,779 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845165776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845165777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:25,824 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/4b2ccfee14104fd9bca6a3378f02138c 2024-12-10T15:38:25,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/2d9631305bf241fb858448907f7a3c99 is 50, key is test_row_0/B:col10/1733845105062/Put/seqid=0 2024-12-10T15:38:25,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742395_1571 (size=12301) 2024-12-10T15:38:25,880 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/2d9631305bf241fb858448907f7a3c99 2024-12-10T15:38:25,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7cae231e6e45488b8570062183a4835f is 50, key is test_row_0/C:col10/1733845105062/Put/seqid=0 2024-12-10T15:38:25,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742396_1572 (size=12301) 2024-12-10T15:38:25,961 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7cae231e6e45488b8570062183a4835f 2024-12-10T15:38:26,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/4b2ccfee14104fd9bca6a3378f02138c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c 2024-12-10T15:38:26,040 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c, entries=150, sequenceid=391, filesize=12.0 K 2024-12-10T15:38:26,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/2d9631305bf241fb858448907f7a3c99 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99 2024-12-10T15:38:26,064 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99, entries=150, sequenceid=391, filesize=12.0 K 2024-12-10T15:38:26,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/7cae231e6e45488b8570062183a4835f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f 2024-12-10T15:38:26,077 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f, entries=150, sequenceid=391, filesize=12.0 K 2024-12-10T15:38:26,078 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 7b74038778882593ac40a176deaf1ba7 in 758ms, sequenceid=391, compaction requested=false 2024-12-10T15:38:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-10T15:38:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-10T15:38:26,086 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T15:38:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:26,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:26,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:26,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-10T15:38:26,093 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0720 sec 2024-12-10T15:38:26,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.0850 sec 2024-12-10T15:38:26,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a2276425178d485d961d289eb2cedb02 is 50, key is test_row_0/A:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-10T15:38:26,122 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-10T15:38:26,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-10T15:38:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:26,125 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:26,127 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:26,127 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:26,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845166130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742397_1573 (size=14741) 2024-12-10T15:38:26,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845166138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845166136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a2276425178d485d961d289eb2cedb02 2024-12-10T15:38:26,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0f47382c955143f5b3185817e5442546 is 50, key is test_row_0/B:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:26,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:26,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742398_1574 (size=12301) 2024-12-10T15:38:26,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845166240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845166244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845166244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,280 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:26,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:26,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:26,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:26,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:26,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845166455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845166456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845166457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,590 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,591 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:26,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:26,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0f47382c955143f5b3185817e5442546 2024-12-10T15:38:26,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/0d9e4119fced490189b1c714a50dc849 is 50, key is test_row_0/C:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:26,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742399_1575 (size=12301) 2024-12-10T15:38:26,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:26,745 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:26,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:26,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845166758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845166765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:26,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845166767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:26,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:26,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:26,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:26,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:26,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:27,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:27,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:27,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/0d9e4119fced490189b1c714a50dc849 2024-12-10T15:38:27,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a2276425178d485d961d289eb2cedb02 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02 2024-12-10T15:38:27,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02, entries=200, sequenceid=408, filesize=14.4 K 2024-12-10T15:38:27,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/0f47382c955143f5b3185817e5442546 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546 2024-12-10T15:38:27,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546, entries=150, sequenceid=408, filesize=12.0 K 2024-12-10T15:38:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/0d9e4119fced490189b1c714a50dc849 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849 2024-12-10T15:38:27,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849, entries=150, sequenceid=408, filesize=12.0 K 2024-12-10T15:38:27,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7b74038778882593ac40a176deaf1ba7 in 1079ms, sequenceid=408, compaction requested=true 2024-12-10T15:38:27,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:27,169 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:38:27,169 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:27,169 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:27,178 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:27,178 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:27,178 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,178 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/c7416b35a6b34131aec9efb32ff9c17e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.9 K 2024-12-10T15:38:27,178 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:27,178 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:27,178 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,178 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/cddb0e0b2edb4970ba33b91f9f8d5e0b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=39.3 K 2024-12-10T15:38:27,178 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c7416b35a6b34131aec9efb32ff9c17e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104366 2024-12-10T15:38:27,187 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cae231e6e45488b8570062183a4835f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733845105050 2024-12-10T15:38:27,189 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting cddb0e0b2edb4970ba33b91f9f8d5e0b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104366 2024-12-10T15:38:27,189 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d9e4119fced490189b1c714a50dc849, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:27,189 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b2ccfee14104fd9bca6a3378f02138c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733845105050 2024-12-10T15:38:27,191 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a2276425178d485d961d289eb2cedb02, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:27,219 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,224 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:27,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:27,237 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#494 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:27,237 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/b80cf02888534571b3d9dd2cca531c2e is 50, key is test_row_0/C:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:27,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a72266238afa4cdb8b6ee0d1f978796b is 50, key is test_row_0/A:col10/1733845106137/Put/seqid=0 2024-12-10T15:38:27,246 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:27,247 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/9b57b35979be4bac9faf859e409eef0a is 50, key is test_row_0/A:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:27,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:27,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742400_1576 (size=13289) 2024-12-10T15:38:27,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742402_1578 (size=13289) 2024-12-10T15:38:27,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845167317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,329 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/b80cf02888534571b3d9dd2cca531c2e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/b80cf02888534571b3d9dd2cca531c2e 2024-12-10T15:38:27,336 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845167326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845167326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,349 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into b80cf02888534571b3d9dd2cca531c2e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:27,349 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:27,349 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845107169; duration=0sec 2024-12-10T15:38:27,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:27,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:27,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:27,350 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:27,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:27,351 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,351 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/28456c23727c480095ee21a0da981a83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=36.9 K 2024-12-10T15:38:27,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 28456c23727c480095ee21a0da981a83, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1733845104366 2024-12-10T15:38:27,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d9631305bf241fb858448907f7a3c99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733845105050 2024-12-10T15:38:27,351 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f47382c955143f5b3185817e5442546, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:27,371 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:27,371 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7aa7b6969cf14a068b0137459322b1c0 is 50, key is test_row_0/B:col10/1733845105460/Put/seqid=0 2024-12-10T15:38:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742401_1577 (size=12301) 2024-12-10T15:38:27,376 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a72266238afa4cdb8b6ee0d1f978796b 2024-12-10T15:38:27,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742403_1579 (size=13289) 2024-12-10T15:38:27,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/891527e911b245ffb5018cb51a6fb6e1 is 50, key is test_row_0/B:col10/1733845106137/Put/seqid=0 2024-12-10T15:38:27,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845167432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845167438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742404_1580 (size=12301) 2024-12-10T15:38:27,448 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/891527e911b245ffb5018cb51a6fb6e1 2024-12-10T15:38:27,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845167448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,451 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/7aa7b6969cf14a068b0137459322b1c0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7aa7b6969cf14a068b0137459322b1c0 2024-12-10T15:38:27,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8d940558096c41c89a8f8813828d3b88 is 50, key is test_row_0/C:col10/1733845106137/Put/seqid=0 2024-12-10T15:38:27,462 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into 7aa7b6969cf14a068b0137459322b1c0(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:27,462 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:27,462 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845107169; duration=0sec 2024-12-10T15:38:27,462 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:27,462 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:27,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742405_1581 (size=12301) 2024-12-10T15:38:27,492 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8d940558096c41c89a8f8813828d3b88 2024-12-10T15:38:27,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/a72266238afa4cdb8b6ee0d1f978796b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b 2024-12-10T15:38:27,520 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b, entries=150, sequenceid=430, filesize=12.0 K 2024-12-10T15:38:27,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/891527e911b245ffb5018cb51a6fb6e1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1 2024-12-10T15:38:27,532 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1, entries=150, sequenceid=430, filesize=12.0 K 2024-12-10T15:38:27,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8d940558096c41c89a8f8813828d3b88 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88 2024-12-10T15:38:27,541 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88, entries=150, sequenceid=430, filesize=12.0 K 2024-12-10T15:38:27,543 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 7b74038778882593ac40a176deaf1ba7 in 319ms, sequenceid=430, compaction requested=false 2024-12-10T15:38:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-10T15:38:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-10T15:38:27,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-10T15:38:27,555 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4240 sec 2024-12-10T15:38:27,559 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.4320 sec 2024-12-10T15:38:27,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:27,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:27,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/0ae1014a9f5748c7b88fd1cba12f6ff8 is 50, key is test_row_0/A:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:27,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742406_1582 (size=17181) 2024-12-10T15:38:27,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845167703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845167704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845167711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,732 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/9b57b35979be4bac9faf859e409eef0a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9b57b35979be4bac9faf859e409eef0a 2024-12-10T15:38:27,748 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into 9b57b35979be4bac9faf859e409eef0a(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:27,748 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:27,748 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845107169; duration=0sec 2024-12-10T15:38:27,748 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:27,748 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:27,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845167813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845167813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845167829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:27,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43420 deadline: 1733845167866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:27,871 DEBUG [Thread-2172 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8244 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:28,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845168019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845168021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845168033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/0ae1014a9f5748c7b88fd1cba12f6ff8 2024-12-10T15:38:28,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f831b69473ae47d9902a74adeaeb7c08 is 50, key is test_row_0/B:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:28,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742407_1583 (size=12301) 2024-12-10T15:38:28,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f831b69473ae47d9902a74adeaeb7c08 2024-12-10T15:38:28,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/2bb8140e33294b70b3f70df8d9125760 is 50, key is test_row_0/C:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:28,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742408_1584 (size=12301) 2024-12-10T15:38:28,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/2bb8140e33294b70b3f70df8d9125760 2024-12-10T15:38:28,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/0ae1014a9f5748c7b88fd1cba12f6ff8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8 2024-12-10T15:38:28,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8, entries=250, sequenceid=449, filesize=16.8 K 2024-12-10T15:38:28,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/f831b69473ae47d9902a74adeaeb7c08 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08 2024-12-10T15:38:28,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08, entries=150, sequenceid=449, filesize=12.0 K 2024-12-10T15:38:28,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/2bb8140e33294b70b3f70df8d9125760 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760 2024-12-10T15:38:28,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760, entries=150, sequenceid=449, filesize=12.0 K 2024-12-10T15:38:28,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 7b74038778882593ac40a176deaf1ba7 in 579ms, sequenceid=449, compaction requested=true 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:28,227 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:28,227 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7b74038778882593ac40a176deaf1ba7:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:28,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42771 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/A is initiating minor compaction (all files) 2024-12-10T15:38:28,228 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/A in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,228 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9b57b35979be4bac9faf859e409eef0a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=41.8 K 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/B is initiating minor compaction (all files) 2024-12-10T15:38:28,228 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/B in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b57b35979be4bac9faf859e409eef0a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:28,228 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7aa7b6969cf14a068b0137459322b1c0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=37.0 K 2024-12-10T15:38:28,228 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a72266238afa4cdb8b6ee0d1f978796b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845106129 2024-12-10T15:38:28,229 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aa7b6969cf14a068b0137459322b1c0, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:28,229 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ae1014a9f5748c7b88fd1cba12f6ff8, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733845107311 2024-12-10T15:38:28,229 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 891527e911b245ffb5018cb51a6fb6e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845106129 2024-12-10T15:38:28,230 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f831b69473ae47d9902a74adeaeb7c08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733845107323 2024-12-10T15:38:28,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-10T15:38:28,236 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-10T15:38:28,236 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#A#compaction#503 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:28,236 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/fd89ca87c40149eeb3493051255c94b4 is 50, key is test_row_0/A:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:28,237 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:28,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-10T15:38:28,238 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:28,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:28,239 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:28,239 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:28,249 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#B#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:28,249 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/ab6ccee37612491c88b5ba0872743420 is 50, key is test_row_0/B:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:28,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742409_1585 (size=13391) 2024-12-10T15:38:28,273 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/fd89ca87c40149eeb3493051255c94b4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fd89ca87c40149eeb3493051255c94b4 2024-12-10T15:38:28,278 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/A of 7b74038778882593ac40a176deaf1ba7 into fd89ca87c40149eeb3493051255c94b4(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:28,278 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:28,278 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/A, priority=13, startTime=1733845108227; duration=0sec 2024-12-10T15:38:28,278 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:28,278 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:A 2024-12-10T15:38:28,278 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:28,279 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:28,279 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): 7b74038778882593ac40a176deaf1ba7/C is initiating minor compaction (all files) 2024-12-10T15:38:28,279 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7b74038778882593ac40a176deaf1ba7/C in TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,279 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/b80cf02888534571b3d9dd2cca531c2e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp, totalSize=37.0 K 2024-12-10T15:38:28,280 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b80cf02888534571b3d9dd2cca531c2e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1733845105412 2024-12-10T15:38:28,282 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d940558096c41c89a8f8813828d3b88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845106129 2024-12-10T15:38:28,282 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bb8140e33294b70b3f70df8d9125760, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1733845107323 2024-12-10T15:38:28,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742410_1586 (size=13391) 2024-12-10T15:38:28,289 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7b74038778882593ac40a176deaf1ba7#C#compaction#505 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:28,289 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/bf48975b07964e3aaf63424fd3c3b7dc is 50, key is test_row_0/C:col10/1733845107323/Put/seqid=0 2024-12-10T15:38:28,300 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/ab6ccee37612491c88b5ba0872743420 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/ab6ccee37612491c88b5ba0872743420 2024-12-10T15:38:28,303 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/B of 7b74038778882593ac40a176deaf1ba7 into ab6ccee37612491c88b5ba0872743420(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:28,303 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:28,303 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/B, priority=13, startTime=1733845108227; duration=0sec 2024-12-10T15:38:28,303 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:28,303 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:B 2024-12-10T15:38:28,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742411_1587 (size=13391) 2024-12-10T15:38:28,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:28,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:28,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:28,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/558eedc8b86649a7b85dfd66e3cb10f3 is 50, key is test_row_0/A:col10/1733845107703/Put/seqid=0 2024-12-10T15:38:28,342 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/bf48975b07964e3aaf63424fd3c3b7dc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/bf48975b07964e3aaf63424fd3c3b7dc 2024-12-10T15:38:28,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:28,348 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7b74038778882593ac40a176deaf1ba7/C of 7b74038778882593ac40a176deaf1ba7 into bf48975b07964e3aaf63424fd3c3b7dc(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:28,348 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:28,348 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7., storeName=7b74038778882593ac40a176deaf1ba7/C, priority=13, startTime=1733845108227; duration=0sec 2024-12-10T15:38:28,348 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:28,348 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7b74038778882593ac40a176deaf1ba7:C 2024-12-10T15:38:28,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742412_1588 (size=14741) 2024-12-10T15:38:28,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/558eedc8b86649a7b85dfd66e3cb10f3 2024-12-10T15:38:28,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845168372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,385 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c0a7cba407534861bff78dde000d0dcf is 50, key is test_row_0/B:col10/1733845107703/Put/seqid=0 2024-12-10T15:38:28,390 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:28,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:28,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845168380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845168391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742413_1589 (size=12301) 2024-12-10T15:38:28,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845168482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845168494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845168495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,544 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:28,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:28,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:28,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845168686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,701 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845168699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845168699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c0a7cba407534861bff78dde000d0dcf 2024-12-10T15:38:28,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5983e79cc0ec4dd290a3583ba5c32d49 is 50, key is test_row_0/C:col10/1733845107703/Put/seqid=0 2024-12-10T15:38:28,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:28,852 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:28,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:28,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:28,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:28,853 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:28,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742414_1590 (size=12301) 2024-12-10T15:38:28,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:28,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43478 deadline: 1733845168989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,005 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:29,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43414 deadline: 1733845169003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:29,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43444 deadline: 1733845169005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,007 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:29,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:29,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:29,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:29,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:29,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:29,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:29,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:29,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:29,200 DEBUG [Thread-2179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:56346 2024-12-10T15:38:29,200 DEBUG [Thread-2179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,202 DEBUG [Thread-2181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51196534 to 127.0.0.1:56346 2024-12-10T15:38:29,202 DEBUG [Thread-2181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,202 DEBUG [Thread-2185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e96b8ad to 127.0.0.1:56346 2024-12-10T15:38:29,203 DEBUG [Thread-2185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,205 DEBUG [Thread-2187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17e5a47d to 127.0.0.1:56346 2024-12-10T15:38:29,205 DEBUG [Thread-2187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,212 DEBUG [Thread-2183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc5e114 to 127.0.0.1:56346 2024-12-10T15:38:29,212 DEBUG [Thread-2183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,271 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5983e79cc0ec4dd290a3583ba5c32d49 2024-12-10T15:38:29,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/558eedc8b86649a7b85dfd66e3cb10f3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/558eedc8b86649a7b85dfd66e3cb10f3 2024-12-10T15:38:29,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/558eedc8b86649a7b85dfd66e3cb10f3, entries=200, sequenceid=472, filesize=14.4 K 2024-12-10T15:38:29,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c0a7cba407534861bff78dde000d0dcf as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c0a7cba407534861bff78dde000d0dcf 2024-12-10T15:38:29,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c0a7cba407534861bff78dde000d0dcf, entries=150, sequenceid=472, filesize=12.0 K 2024-12-10T15:38:29,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5983e79cc0ec4dd290a3583ba5c32d49 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5983e79cc0ec4dd290a3583ba5c32d49 2024-12-10T15:38:29,283 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5983e79cc0ec4dd290a3583ba5c32d49, entries=150, sequenceid=472, filesize=12.0 K 2024-12-10T15:38:29,283 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7b74038778882593ac40a176deaf1ba7 in 956ms, sequenceid=472, compaction requested=false 2024-12-10T15:38:29,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:29,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:29,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:29,318 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:29,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:29,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:29,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/09468b5727994ce8be7445999fe3f8e7 is 50, key is test_row_0/A:col10/1733845108359/Put/seqid=0 2024-12-10T15:38:29,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742415_1591 (size=12301) 2024-12-10T15:38:29,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:29,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. as already flushing 2024-12-10T15:38:29,499 DEBUG [Thread-2169 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:56346 2024-12-10T15:38:29,499 DEBUG [Thread-2169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,510 DEBUG [Thread-2174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:56346 2024-12-10T15:38:29,510 DEBUG [Thread-2176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:56346 2024-12-10T15:38:29,510 DEBUG [Thread-2174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,510 DEBUG [Thread-2176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,581 DEBUG [Thread-2167 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:56346 2024-12-10T15:38:29,581 DEBUG [Thread-2167 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:29,725 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/09468b5727994ce8be7445999fe3f8e7 2024-12-10T15:38:29,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5886da1c5b344d58f96aafe2bf209ab is 50, key is test_row_0/B:col10/1733845108359/Put/seqid=0 2024-12-10T15:38:29,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742416_1592 (size=12301) 2024-12-10T15:38:30,137 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5886da1c5b344d58f96aafe2bf209ab 2024-12-10T15:38:30,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fd6b72592b14bb89020a562159ea302 is 50, key is test_row_0/C:col10/1733845108359/Put/seqid=0 2024-12-10T15:38:30,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742417_1593 (size=12301) 2024-12-10T15:38:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:30,545 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=489 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fd6b72592b14bb89020a562159ea302 2024-12-10T15:38:30,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/09468b5727994ce8be7445999fe3f8e7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/09468b5727994ce8be7445999fe3f8e7 2024-12-10T15:38:30,550 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/09468b5727994ce8be7445999fe3f8e7, entries=150, sequenceid=489, filesize=12.0 K 2024-12-10T15:38:30,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/c5886da1c5b344d58f96aafe2bf209ab as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5886da1c5b344d58f96aafe2bf209ab 2024-12-10T15:38:30,552 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5886da1c5b344d58f96aafe2bf209ab, entries=150, sequenceid=489, filesize=12.0 K 2024-12-10T15:38:30,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/5fd6b72592b14bb89020a562159ea302 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fd6b72592b14bb89020a562159ea302 2024-12-10T15:38:30,555 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fd6b72592b14bb89020a562159ea302, entries=150, sequenceid=489, filesize=12.0 K 2024-12-10T15:38:30,555 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=26.84 KB/27480 for 7b74038778882593ac40a176deaf1ba7 in 1237ms, sequenceid=489, compaction requested=true 2024-12-10T15:38:30,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:30,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:30,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-10T15:38:30,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-10T15:38:30,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-10T15:38:30,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3170 sec 2024-12-10T15:38:30,559 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.3220 sec 2024-12-10T15:38:32,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-10T15:38:32,347 INFO [Thread-2178 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-10T15:38:37,902 DEBUG [Thread-2172 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:56346 2024-12-10T15:38:37,902 DEBUG [Thread-2172 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 10 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 134 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 22 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 103 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 119 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2033 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6099 rows 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2032 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6096 rows 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2045 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6135 rows 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2046 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6138 rows 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2029 2024-12-10T15:38:37,902 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6087 rows 2024-12-10T15:38:37,902 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:38:37,902 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:56346 2024-12-10T15:38:37,902 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:38:37,904 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:38:37,913 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:38:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:37,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:37,937 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845117937"}]},"ts":"1733845117937"} 2024-12-10T15:38:37,958 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:38:37,981 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:38:37,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:38:37,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, UNASSIGN}] 2024-12-10T15:38:37,988 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, UNASSIGN 2024-12-10T15:38:37,989 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=7b74038778882593ac40a176deaf1ba7, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:37,992 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:38:37,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; CloseRegionProcedure 7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:38:38,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:38,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:38,146 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(124): Close 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1681): Closing 7b74038778882593ac40a176deaf1ba7, disabling compactions & flushes 2024-12-10T15:38:38,146 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. after waiting 0 ms 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:38,146 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(2837): Flushing 7b74038778882593ac40a176deaf1ba7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=A 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=B 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7b74038778882593ac40a176deaf1ba7, store=C 2024-12-10T15:38:38,146 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:38,149 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/1cce5c5f4a7742a9970662cd3ac8e497 is 50, key is test_row_0/A:col10/1733845109581/Put/seqid=0 2024-12-10T15:38:38,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742418_1594 (size=9857) 2024-12-10T15:38:38,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:38,555 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/1cce5c5f4a7742a9970662cd3ac8e497 2024-12-10T15:38:38,567 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/3461182e61844504b035bd807d544cd1 is 50, key is test_row_0/B:col10/1733845109581/Put/seqid=0 2024-12-10T15:38:38,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742419_1595 (size=9857) 2024-12-10T15:38:38,582 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/3461182e61844504b035bd807d544cd1 2024-12-10T15:38:38,592 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 is 50, key is test_row_0/C:col10/1733845109581/Put/seqid=0 2024-12-10T15:38:38,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742420_1596 (size=9857) 2024-12-10T15:38:39,001 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 2024-12-10T15:38:39,004 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/A/1cce5c5f4a7742a9970662cd3ac8e497 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/1cce5c5f4a7742a9970662cd3ac8e497 2024-12-10T15:38:39,006 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/1cce5c5f4a7742a9970662cd3ac8e497, entries=100, sequenceid=497, filesize=9.6 K 2024-12-10T15:38:39,007 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/B/3461182e61844504b035bd807d544cd1 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/3461182e61844504b035bd807d544cd1 2024-12-10T15:38:39,009 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/3461182e61844504b035bd807d544cd1, entries=100, sequenceid=497, filesize=9.6 K 2024-12-10T15:38:39,009 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/.tmp/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 2024-12-10T15:38:39,011 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4, entries=100, sequenceid=497, filesize=9.6 K 2024-12-10T15:38:39,011 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7b74038778882593ac40a176deaf1ba7 in 865ms, sequenceid=497, compaction requested=true 2024-12-10T15:38:39,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e8404abff83b40fda8d3032a89f0cb42, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/96a02f1f7747426990050974dde06467, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/91a8b4b7d1ed4d468f993c2f8c3288f0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddd9e624a0614ecbbe0e2d387f73712f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e0f940a0db5143db8723636a1884f44d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/77349b78d6654003b7ad619789767d64, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/cddb0e0b2edb4970ba33b91f9f8d5e0b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9b57b35979be4bac9faf859e409eef0a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8] to archive 2024-12-10T15:38:39,013 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:38:39,014 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/27a59da5167f404b9bbcbf4490feeac7 2024-12-10T15:38:39,014 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e8404abff83b40fda8d3032a89f0cb42 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e8404abff83b40fda8d3032a89f0cb42 2024-12-10T15:38:39,014 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/98a7f466bb6b4c999d21f45091fc2139 2024-12-10T15:38:39,015 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c08b7f6fa7c34827a3ae34c4c7bbe3d9 2024-12-10T15:38:39,015 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/3559e476e2184ac782051a0b694eeb73 2024-12-10T15:38:39,015 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/2dc17561605f4de0be429157e53189e3 2024-12-10T15:38:39,015 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/d8f5849447824e93b6419d600f3df6d1 2024-12-10T15:38:39,016 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/dbc8f70405674514a08dcc2352a33a85 2024-12-10T15:38:39,016 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/c3fec18d5f7345628b9d503d91569895 2024-12-10T15:38:39,016 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/96a02f1f7747426990050974dde06467 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/96a02f1f7747426990050974dde06467 2024-12-10T15:38:39,016 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9ff81071c00c4a1eae400cf43b2593db 2024-12-10T15:38:39,016 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddcc1bdd1e294a408aec11d10f85200d 2024-12-10T15:38:39,017 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/91a8b4b7d1ed4d468f993c2f8c3288f0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/91a8b4b7d1ed4d468f993c2f8c3288f0 2024-12-10T15:38:39,017 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/19b903afb72d48b291088d9984f25bbf 2024-12-10T15:38:39,017 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e62af4730961438eaca3d2d7b2ebc0c4 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/df595d7399e24735bfdb30e279657137 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/984c66a4508e4e2dab488853269612cd 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/f526f0880fcf4f3ea1cf2413ec14998d 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddd9e624a0614ecbbe0e2d387f73712f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/ddd9e624a0614ecbbe0e2d387f73712f 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a0a40773f39b4ba18c81b74cc6ccf0bc 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e0f940a0db5143db8723636a1884f44d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/e0f940a0db5143db8723636a1884f44d 2024-12-10T15:38:39,018 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/da6adaf4efa146c38f22509cf16eb5d8 2024-12-10T15:38:39,019 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/05b6e822bc74474396a25b6c47e44f2f 2024-12-10T15:38:39,023 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/77349b78d6654003b7ad619789767d64 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/77349b78d6654003b7ad619789767d64 2024-12-10T15:38:39,024 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/4b2ccfee14104fd9bca6a3378f02138c 2024-12-10T15:38:39,025 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a72266238afa4cdb8b6ee0d1f978796b 2024-12-10T15:38:39,025 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/b5634853aa654ef4b7e18fed26e6acd1 2024-12-10T15:38:39,025 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/a2276425178d485d961d289eb2cedb02 2024-12-10T15:38:39,026 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/0ae1014a9f5748c7b88fd1cba12f6ff8 2024-12-10T15:38:39,027 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9b57b35979be4bac9faf859e409eef0a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/9b57b35979be4bac9faf859e409eef0a 2024-12-10T15:38:39,031 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fc7a5ffff255420aa24eb241787847a2 2024-12-10T15:38:39,032 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/cddb0e0b2edb4970ba33b91f9f8d5e0b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/cddb0e0b2edb4970ba33b91f9f8d5e0b 2024-12-10T15:38:39,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0bff59617a7b4ee8bda9f0cdd5fc9e83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/daec679b0dcf4eb28b3ac47bf480ad8f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/df2f603f03854079bb72231ee870be3c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/007d3359c99a4e378fc6257964cb0387, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8e2779d4ee394107ac6361073a4069a6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/9e90ab8379964adaab4d5cad2e18f14b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/267db63be3ba4e27bc66a7e7d693ff64, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/28456c23727c480095ee21a0da981a83, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7aa7b6969cf14a068b0137459322b1c0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08] to archive 2024-12-10T15:38:39,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:38:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:39,036 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7b9c950e34e04d5fa5bb88976a5b634d 2024-12-10T15:38:39,036 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/98fe875f6f5345469c0e408022681d1d 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0bff59617a7b4ee8bda9f0cdd5fc9e83 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0bff59617a7b4ee8bda9f0cdd5fc9e83 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/47288bb1b8d4402681694c0c313d1837 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/daec679b0dcf4eb28b3ac47bf480ad8f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/daec679b0dcf4eb28b3ac47bf480ad8f 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/bb7a1069ceeb4d47978b0725604a3f67 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/fa8b09f47ff744769083fdacbef847c9 2024-12-10T15:38:39,037 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7fc1659739c241d299ee71d2487de1ee 2024-12-10T15:38:39,038 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/df2f603f03854079bb72231ee870be3c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/df2f603f03854079bb72231ee870be3c 2024-12-10T15:38:39,038 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/37f4cf8abd68483c8851bb0de67366df 2024-12-10T15:38:39,039 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/007d3359c99a4e378fc6257964cb0387 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/007d3359c99a4e378fc6257964cb0387 2024-12-10T15:38:39,039 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e77fa13d28d94f32a5dfc6c035207e26 2024-12-10T15:38:39,039 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/cd3e8cb1848c4318bed6ed703f485c68 2024-12-10T15:38:39,040 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8c136889fd804329b64afdbc569b0d4c 2024-12-10T15:38:39,041 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8e2779d4ee394107ac6361073a4069a6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/8e2779d4ee394107ac6361073a4069a6 2024-12-10T15:38:39,042 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/feee833bc37345ada56674a800b0d40c 2024-12-10T15:38:39,042 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/9e90ab8379964adaab4d5cad2e18f14b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/9e90ab8379964adaab4d5cad2e18f14b 2024-12-10T15:38:39,042 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/e540db12406f4dae9ce05b0e187a38e5 2024-12-10T15:38:39,043 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/927dac75d4d0465fabbe703401720b83 2024-12-10T15:38:39,043 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/4e1ff22e84984a398149ee28d77cd619 2024-12-10T15:38:39,043 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/76ded84aaf3f47bdb313478505ed80ea 2024-12-10T15:38:39,044 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0411940ba7fa4125acbd209e92a33ef5 2024-12-10T15:38:39,045 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7aa7b6969cf14a068b0137459322b1c0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/7aa7b6969cf14a068b0137459322b1c0 2024-12-10T15:38:39,045 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/2d9631305bf241fb858448907f7a3c99 2024-12-10T15:38:39,045 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5c5f28945c24e1fb3804826458e0cc4 2024-12-10T15:38:39,047 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f913faced63844a29c12162cf5a86d15 2024-12-10T15:38:39,047 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/28456c23727c480095ee21a0da981a83 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/28456c23727c480095ee21a0da981a83 2024-12-10T15:38:39,048 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f831b69473ae47d9902a74adeaeb7c08 2024-12-10T15:38:39,048 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/0f47382c955143f5b3185817e5442546 2024-12-10T15:38:39,048 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/891527e911b245ffb5018cb51a6fb6e1 2024-12-10T15:38:39,048 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/267db63be3ba4e27bc66a7e7d693ff64 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/267db63be3ba4e27bc66a7e7d693ff64 2024-12-10T15:38:39,048 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/f5feafa4e2b047a4b27e914fd119ca3e 2024-12-10T15:38:39,050 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f30d0e58270d4b0f89f2b4405c6811b8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/ebf50abddb664603bcaea11adbceb425, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4c9cf326bc9d43a59a93f0af07976944, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4337be7a8e2c4c2594e9f9eb6d74e910, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f5ea53c863124e8085dbd1174739007a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6decb32a68104a6590b17cd9ae6c7e13, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5889091016bb457588f0fd0ad3a4c8d9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/c7416b35a6b34131aec9efb32ff9c17e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/b80cf02888534571b3d9dd2cca531c2e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760] to archive 2024-12-10T15:38:39,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:38:39,053 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f30d0e58270d4b0f89f2b4405c6811b8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f30d0e58270d4b0f89f2b4405c6811b8 2024-12-10T15:38:39,053 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/51249cb9ab7c404890723366d2e50495 2024-12-10T15:38:39,053 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4acf0503856b40618d019f4042f76d23 2024-12-10T15:38:39,054 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/d4f609e7d36043e5ab7e663596a0be43 2024-12-10T15:38:39,054 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/ebf50abddb664603bcaea11adbceb425 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/ebf50abddb664603bcaea11adbceb425 2024-12-10T15:38:39,054 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7f46083540d24255ae48587c9c2dedfa 2024-12-10T15:38:39,054 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd51544e17ba44bc95688200e4e55c82 2024-12-10T15:38:39,055 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/1f9b102138f94045af0d146d2058ddce 2024-12-10T15:38:39,055 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4c9cf326bc9d43a59a93f0af07976944 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4c9cf326bc9d43a59a93f0af07976944 2024-12-10T15:38:39,055 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7e6f4626a61f4d16858263092208504f 2024-12-10T15:38:39,056 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4337be7a8e2c4c2594e9f9eb6d74e910 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4337be7a8e2c4c2594e9f9eb6d74e910 2024-12-10T15:38:39,056 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/223f9d79868c4449aadbba2f204a6a3a 2024-12-10T15:38:39,056 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/9788429a77e14996993ee70176b2cb90 2024-12-10T15:38:39,056 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8b870e21e3cd4e4fb404d0e2ba1bd10e 2024-12-10T15:38:39,057 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/a9e31e49551d4871a4428756327cfef2 2024-12-10T15:38:39,057 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6576301b304d4ed4ab5485df32988590 2024-12-10T15:38:39,058 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/020ed929ce3841a18fbc52928f0c2dad 2024-12-10T15:38:39,058 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f5ea53c863124e8085dbd1174739007a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/f5ea53c863124e8085dbd1174739007a 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6decb32a68104a6590b17cd9ae6c7e13 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/6decb32a68104a6590b17cd9ae6c7e13 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fb119b6ae374ee887c3e20c73982aa8 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/fd08018c9cf5426bbe8d4c820dfaaf07 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7fce6a196916498d866768e75fcbdb04 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/500b0eff31e9443dae597af19b226ade 2024-12-10T15:38:39,059 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5889091016bb457588f0fd0ad3a4c8d9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5889091016bb457588f0fd0ad3a4c8d9 2024-12-10T15:38:39,060 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/4167c7deccb549e3a139838873b192a2 2024-12-10T15:38:39,060 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/b80cf02888534571b3d9dd2cca531c2e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/b80cf02888534571b3d9dd2cca531c2e 2024-12-10T15:38:39,060 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/efacf21384984c868c794ee4c2cbe72a 2024-12-10T15:38:39,060 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/7cae231e6e45488b8570062183a4835f 2024-12-10T15:38:39,060 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/c7416b35a6b34131aec9efb32ff9c17e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/c7416b35a6b34131aec9efb32ff9c17e 2024-12-10T15:38:39,061 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8d940558096c41c89a8f8813828d3b88 2024-12-10T15:38:39,061 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/0d9e4119fced490189b1c714a50dc849 2024-12-10T15:38:39,061 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/2bb8140e33294b70b3f70df8d9125760 2024-12-10T15:38:39,065 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/recovered.edits/500.seqid, newMaxSeqId=500, maxSeqId=1 2024-12-10T15:38:39,066 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7. 2024-12-10T15:38:39,066 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] regionserver.HRegion(1635): Region close journal for 7b74038778882593ac40a176deaf1ba7: 2024-12-10T15:38:39,067 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=145}] handler.UnassignRegionHandler(170): Closed 7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:39,067 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=144 updating hbase:meta row=7b74038778882593ac40a176deaf1ba7, regionState=CLOSED 2024-12-10T15:38:39,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-10T15:38:39,069 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseRegionProcedure 7b74038778882593ac40a176deaf1ba7, server=bf0fec90ff6d,46239,1733844953049 in 1.0760 sec 2024-12-10T15:38:39,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-12-10T15:38:39,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7b74038778882593ac40a176deaf1ba7, UNASSIGN in 1.0820 sec 2024-12-10T15:38:39,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-10T15:38:39,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.0860 sec 2024-12-10T15:38:39,072 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845119072"}]},"ts":"1733845119072"} 2024-12-10T15:38:39,073 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:38:39,083 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:38:39,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1710 sec 2024-12-10T15:38:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-10T15:38:40,035 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-10T15:38:40,035 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:38:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,037 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=146, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-10T15:38:40,037 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=146, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,038 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:40,040 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/recovered.edits] 2024-12-10T15:38:40,044 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/09468b5727994ce8be7445999fe3f8e7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/09468b5727994ce8be7445999fe3f8e7 2024-12-10T15:38:40,044 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/1cce5c5f4a7742a9970662cd3ac8e497 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/1cce5c5f4a7742a9970662cd3ac8e497 2024-12-10T15:38:40,044 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fd89ca87c40149eeb3493051255c94b4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/fd89ca87c40149eeb3493051255c94b4 2024-12-10T15:38:40,052 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/558eedc8b86649a7b85dfd66e3cb10f3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/A/558eedc8b86649a7b85dfd66e3cb10f3 2024-12-10T15:38:40,055 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/3461182e61844504b035bd807d544cd1 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/3461182e61844504b035bd807d544cd1 2024-12-10T15:38:40,055 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c0a7cba407534861bff78dde000d0dcf to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c0a7cba407534861bff78dde000d0dcf 2024-12-10T15:38:40,055 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/ab6ccee37612491c88b5ba0872743420 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/ab6ccee37612491c88b5ba0872743420 2024-12-10T15:38:40,055 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5886da1c5b344d58f96aafe2bf209ab to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/B/c5886da1c5b344d58f96aafe2bf209ab 2024-12-10T15:38:40,057 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5983e79cc0ec4dd290a3583ba5c32d49 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5983e79cc0ec4dd290a3583ba5c32d49 2024-12-10T15:38:40,057 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/bf48975b07964e3aaf63424fd3c3b7dc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/bf48975b07964e3aaf63424fd3c3b7dc 2024-12-10T15:38:40,057 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fd6b72592b14bb89020a562159ea302 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/5fd6b72592b14bb89020a562159ea302 2024-12-10T15:38:40,057 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/C/8dfb2cd8fbba4d1a98b36396dbfa7ff4 2024-12-10T15:38:40,059 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/recovered.edits/500.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7/recovered.edits/500.seqid 2024-12-10T15:38:40,059 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/7b74038778882593ac40a176deaf1ba7 2024-12-10T15:38:40,059 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:38:40,061 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=146, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,062 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:38:40,063 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:38:40,064 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=146, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,064 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:38:40,064 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733845120064"}]},"ts":"9223372036854775807"} 2024-12-10T15:38:40,065 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:38:40,065 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7b74038778882593ac40a176deaf1ba7, NAME => 'TestAcidGuarantees,,1733845085440.7b74038778882593ac40a176deaf1ba7.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:38:40,065 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:38:40,066 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733845120065"}]},"ts":"9223372036854775807"} 2024-12-10T15:38:40,066 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:38:40,102 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=146, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 67 msec 2024-12-10T15:38:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-10T15:38:40,138 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-10T15:38:40,148 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=245 (was 243) - Thread LEAK? -, OpenFileDescriptor=446 (was 438) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1193 (was 1224), ProcessCount=13 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2929 (was 263) - AvailableMemoryMB LEAK? - 2024-12-10T15:38:40,158 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=245, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=1193, ProcessCount=11, AvailableMemoryMB=2927 2024-12-10T15:38:40,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:38:40,159 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:38:40,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:40,161 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-10T15:38:40,161 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:40,161 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 147 2024-12-10T15:38:40,162 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-10T15:38:40,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:40,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742421_1597 (size=963) 2024-12-10T15:38:40,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:40,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:40,576 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935 2024-12-10T15:38:40,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742422_1598 (size=53) 2024-12-10T15:38:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:40,987 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:38:40,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f84cee47e13bc12ff2ef81f5e007a839, disabling compactions & flushes 2024-12-10T15:38:40,988 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:40,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:40,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. after waiting 0 ms 2024-12-10T15:38:40,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:40,988 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:40,988 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:40,989 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-10T15:38:40,989 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733845120989"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733845120989"}]},"ts":"1733845120989"} 2024-12-10T15:38:40,990 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-10T15:38:40,990 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-10T15:38:40,991 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845120990"}]},"ts":"1733845120990"} 2024-12-10T15:38:40,992 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-10T15:38:41,015 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, ASSIGN}] 2024-12-10T15:38:41,017 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, ASSIGN 2024-12-10T15:38:41,018 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, ASSIGN; state=OFFLINE, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=false 2024-12-10T15:38:41,168 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:41,169 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; OpenRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:38:41,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:41,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:41,323 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:41,324 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7285): Opening region: {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:38:41,324 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,324 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:38:41,324 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7327): checking encryption for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,324 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(7330): checking classloading for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,325 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,326 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:41,326 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName A 2024-12-10T15:38:41,326 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:41,326 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:41,326 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,327 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:41,327 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName B 2024-12-10T15:38:41,327 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:41,327 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:41,327 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,328 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:41,328 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName C 2024-12-10T15:38:41,328 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:41,328 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:41,329 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:41,329 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,329 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,331 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:38:41,331 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1085): writing seq id for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:41,333 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-10T15:38:41,333 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1102): Opened f84cee47e13bc12ff2ef81f5e007a839; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67363197, jitterRate=0.003789857029914856}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:38:41,334 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegion(1001): Region open journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:41,334 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., pid=149, masterSystemTime=1733845121321 2024-12-10T15:38:41,336 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:41,336 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=149}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:41,336 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=OPEN, openSeqNum=2, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:41,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-10T15:38:41,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; OpenRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 in 168 msec 2024-12-10T15:38:41,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-10T15:38:41,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, ASSIGN in 323 msec 2024-12-10T15:38:41,339 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-10T15:38:41,340 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845121339"}]},"ts":"1733845121339"} 2024-12-10T15:38:41,340 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-10T15:38:41,350 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-10T15:38:41,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1910 sec 2024-12-10T15:38:42,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-10T15:38:42,267 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 147 completed 2024-12-10T15:38:42,268 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-12-10T15:38:42,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:42,299 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:42,304 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:42,310 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-10T15:38:42,311 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-10T15:38:42,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-10T15:38:42,313 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-10T15:38:42,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-10T15:38:42,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742423_1599 (size=999) 2024-12-10T15:38:42,731 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-10T15:38:42,732 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-10T15:38:42,733 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:38:42,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, REOPEN/MOVE}] 2024-12-10T15:38:42,735 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, REOPEN/MOVE 2024-12-10T15:38:42,735 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:42,736 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:38:42,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:38:42,887 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:42,887 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:42,887 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:38:42,887 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing f84cee47e13bc12ff2ef81f5e007a839, disabling compactions & flushes 2024-12-10T15:38:42,887 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:42,888 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:42,888 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. after waiting 0 ms 2024-12-10T15:38:42,888 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:42,890 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-10T15:38:42,891 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:42,891 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:42,891 WARN [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionServer(3786): Not adding moved region record: f84cee47e13bc12ff2ef81f5e007a839 to self. 2024-12-10T15:38:42,892 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:42,892 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=CLOSED 2024-12-10T15:38:42,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-10T15:38:42,894 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 in 157 msec 2024-12-10T15:38:42,894 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, REOPEN/MOVE; state=CLOSED, location=bf0fec90ff6d,46239,1733844953049; forceNewPlan=false, retain=true 2024-12-10T15:38:43,044 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=OPENING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=152, state=RUNNABLE; OpenRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:38:43,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,199 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,199 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7285): Opening region: {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} 2024-12-10T15:38:43,199 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,199 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-10T15:38:43,199 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7327): checking encryption for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,199 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7330): checking classloading for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,200 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,201 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:43,201 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName A 2024-12-10T15:38:43,202 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:43,202 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:43,202 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,203 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:43,203 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName B 2024-12-10T15:38:43,203 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:43,203 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:43,204 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,204 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-10T15:38:43,204 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f84cee47e13bc12ff2ef81f5e007a839 columnFamilyName C 2024-12-10T15:38:43,204 DEBUG [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:43,205 INFO [StoreOpener-f84cee47e13bc12ff2ef81f5e007a839-1 {}] regionserver.HStore(327): Store=f84cee47e13bc12ff2ef81f5e007a839/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-10T15:38:43,205 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,205 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,206 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,207 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-10T15:38:43,208 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1085): writing seq id for f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,209 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1102): Opened f84cee47e13bc12ff2ef81f5e007a839; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70673321, jitterRate=0.053114548325538635}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-10T15:38:43,210 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1001): Region open journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:43,210 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., pid=154, masterSystemTime=1733845123196 2024-12-10T15:38:43,211 DEBUG [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,211 INFO [RS_OPEN_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,212 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=OPEN, openSeqNum=5, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-10T15:38:43,213 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; OpenRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 in 168 msec 2024-12-10T15:38:43,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-10T15:38:43,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, REOPEN/MOVE in 479 msec 2024-12-10T15:38:43,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-10T15:38:43,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-12-10T15:38:43,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 903 msec 2024-12-10T15:38:43,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-10T15:38:43,218 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-12-10T15:38:43,229 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,230 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-12-10T15:38:43,242 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,243 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-12-10T15:38:43,254 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,255 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cfdf76c to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6556601 2024-12-10T15:38:43,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8cd1ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,267 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68c2838a to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@458a85fd 2024-12-10T15:38:43,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d832d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x410bf0c8 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15b6349f 2024-12-10T15:38:43,300 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503a7d2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,301 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67adb273 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@439b60d5 2024-12-10T15:38:43,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404bb685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x474dec36 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f48b1c2 2024-12-10T15:38:43,325 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42aacb30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,326 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68dbad25 to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7287c75d 2024-12-10T15:38:43,339 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e06176, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,339 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d2c412e to 127.0.0.1:56346 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@215ddef9 2024-12-10T15:38:43,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d3f075, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-10T15:38:43,352 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-10T15:38:43,353 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:43,354 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:43,354 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:43,356 DEBUG [hconnection-0x2bc06849-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,357 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,360 DEBUG [hconnection-0x59c5929d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,361 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,363 DEBUG [hconnection-0x3abaf249-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,364 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,365 DEBUG [hconnection-0xa0b662-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,366 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:43,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,370 DEBUG [hconnection-0x2e7e0cf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,371 DEBUG [hconnection-0x23346561-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,371 DEBUG [hconnection-0x43e3b84e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,371 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,371 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,373 DEBUG [hconnection-0x36159bf6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,374 DEBUG [hconnection-0x5b0c0714-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,376 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,376 DEBUG [hconnection-0x4a3b7537-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-10T15:38:43,376 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,377 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,380 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39578, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-10T15:38:43,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845183392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845183393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845183394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845183395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845183395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a4b91f19da294093bbb79ab401d8110b_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845123369/Put/seqid=0 2024-12-10T15:38:43,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742424_1600 (size=12154) 2024-12-10T15:38:43,414 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:43,418 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210a4b91f19da294093bbb79ab401d8110b_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a4b91f19da294093bbb79ab401d8110b_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,420 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7badb19800024adfae93fa05b0c23d5f, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:43,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7badb19800024adfae93fa05b0c23d5f is 175, key is test_row_0/A:col10/1733845123369/Put/seqid=0 2024-12-10T15:38:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742425_1601 (size=30955) 2024-12-10T15:38:43,438 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7badb19800024adfae93fa05b0c23d5f 2024-12-10T15:38:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:43,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/53d5323b2562406886107489bb3b13b6 is 50, key is test_row_0/B:col10/1733845123369/Put/seqid=0 2024-12-10T15:38:43,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742426_1602 (size=12001) 2024-12-10T15:38:43,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/53d5323b2562406886107489bb3b13b6 2024-12-10T15:38:43,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845183496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845183496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845183497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845183497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845183498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/d7b133ee4e604a9caf2ec8ecfa484810 is 50, key is test_row_0/C:col10/1733845123369/Put/seqid=0 2024-12-10T15:38:43,505 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,505 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T15:38:43,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:43,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] handler.RSProcedureHandler(58): pid=156 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:43,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=156 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=156 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742427_1603 (size=12001) 2024-12-10T15:38:43,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/d7b133ee4e604a9caf2ec8ecfa484810 2024-12-10T15:38:43,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7badb19800024adfae93fa05b0c23d5f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f 2024-12-10T15:38:43,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f, entries=150, sequenceid=16, filesize=30.2 K 2024-12-10T15:38:43,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/53d5323b2562406886107489bb3b13b6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6 2024-12-10T15:38:43,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T15:38:43,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/d7b133ee4e604a9caf2ec8ecfa484810 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810 2024-12-10T15:38:43,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810, entries=150, sequenceid=16, filesize=11.7 K 2024-12-10T15:38:43,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for f84cee47e13bc12ff2ef81f5e007a839 in 189ms, sequenceid=16, compaction requested=false 2024-12-10T15:38:43,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:43,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:43,657 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,658 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:43,658 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:43,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:43,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121095457f0e04ee4a21b098fa3458f00e00_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845123392/Put/seqid=0 2024-12-10T15:38:43,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742428_1604 (size=12154) 2024-12-10T15:38:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:43,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:43,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845183702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845183703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845183703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845183703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845183704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845183806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845183806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845183806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845183806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845183806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:44,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845184008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845184008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845184009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845184011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845184011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:44,075 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121095457f0e04ee4a21b098fa3458f00e00_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095457f0e04ee4a21b098fa3458f00e00_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:44,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/654d6100dad64c10aac35dfcc59a98b3, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:44,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/654d6100dad64c10aac35dfcc59a98b3 is 175, key is test_row_0/A:col10/1733845123392/Put/seqid=0 2024-12-10T15:38:44,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742429_1605 (size=30955) 2024-12-10T15:38:44,090 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/654d6100dad64c10aac35dfcc59a98b3 2024-12-10T15:38:44,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/c14c1272a7d74b268841fa14e95424ea is 50, key is test_row_0/B:col10/1733845123392/Put/seqid=0 2024-12-10T15:38:44,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742430_1606 (size=12001) 2024-12-10T15:38:44,112 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/c14c1272a7d74b268841fa14e95424ea 2024-12-10T15:38:44,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6c07267402a342c1aeebaca699c81814 is 50, key is test_row_0/C:col10/1733845123392/Put/seqid=0 2024-12-10T15:38:44,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742431_1607 (size=12001) 2024-12-10T15:38:44,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845184312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845184312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845184313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845184313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845184316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:44,540 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6c07267402a342c1aeebaca699c81814 2024-12-10T15:38:44,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/654d6100dad64c10aac35dfcc59a98b3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3 2024-12-10T15:38:44,547 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3, entries=150, sequenceid=41, filesize=30.2 K 2024-12-10T15:38:44,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/c14c1272a7d74b268841fa14e95424ea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea 2024-12-10T15:38:44,551 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T15:38:44,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6c07267402a342c1aeebaca699c81814 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814 2024-12-10T15:38:44,555 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814, entries=150, sequenceid=41, filesize=11.7 K 2024-12-10T15:38:44,555 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f84cee47e13bc12ff2ef81f5e007a839 in 897ms, sequenceid=41, compaction requested=false 2024-12-10T15:38:44,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:44,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:44,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-10T15:38:44,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-10T15:38:44,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-10T15:38:44,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2130 sec 2024-12-10T15:38:44,587 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 1.2320 sec 2024-12-10T15:38:44,590 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-10T15:38:44,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:44,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:44,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:44,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121020f81acb61794f5289f4c08eaea0418c_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:44,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742432_1608 (size=14594) 2024-12-10T15:38:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845184848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845184849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845184849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845184853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845184853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845184954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845184956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845184956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845184956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:44,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845184957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845185157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845185160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845185158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845185160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845185162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,250 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:45,254 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121020f81acb61794f5289f4c08eaea0418c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121020f81acb61794f5289f4c08eaea0418c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:45,255 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cff3e4b0f06f49a089f4c929f49fd9a2, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:45,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cff3e4b0f06f49a089f4c929f49fd9a2 is 175, key is test_row_0/A:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:45,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742433_1609 (size=39549) 2024-12-10T15:38:45,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845185461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-10T15:38:45,464 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-10T15:38:45,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:45,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845185464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-10T15:38:45,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845185464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:45,467 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:45,467 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:45,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:45,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845185465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845185466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:45,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T15:38:45,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:45,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,619 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,659 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cff3e4b0f06f49a089f4c929f49fd9a2 2024-12-10T15:38:45,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/988862916eae4ab08128397d0a35a658 is 50, key is test_row_0/B:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:45,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742434_1610 (size=12001) 2024-12-10T15:38:45,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/988862916eae4ab08128397d0a35a658 2024-12-10T15:38:45,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e02de74377be4919ab63dce3a6e53a4e is 50, key is test_row_0/C:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:45,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742435_1611 (size=12001) 2024-12-10T15:38:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:45,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T15:38:45,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:45,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T15:38:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:45,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:45,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:45,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845185964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845185967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845185969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845185972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:45,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:45,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845185972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:46,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T15:38:46,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e02de74377be4919ab63dce3a6e53a4e 2024-12-10T15:38:46,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:46,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:46,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:46,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:46,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cff3e4b0f06f49a089f4c929f49fd9a2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2 2024-12-10T15:38:46,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2, entries=200, sequenceid=54, filesize=38.6 K 2024-12-10T15:38:46,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/988862916eae4ab08128397d0a35a658 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658 2024-12-10T15:38:46,102 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658, entries=150, sequenceid=54, filesize=11.7 K 2024-12-10T15:38:46,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e02de74377be4919ab63dce3a6e53a4e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e 2024-12-10T15:38:46,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e, entries=150, sequenceid=54, filesize=11.7 K 2024-12-10T15:38:46,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for f84cee47e13bc12ff2ef81f5e007a839 in 1291ms, sequenceid=54, compaction requested=true 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:46,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-10T15:38:46,111 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:46,114 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:46,114 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:46,114 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:46,114 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,115 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=99.1 K 2024-12-10T15:38:46,115 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,115 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2] 2024-12-10T15:38:46,115 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 7badb19800024adfae93fa05b0c23d5f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733845123367 2024-12-10T15:38:46,116 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:46,116 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:46,116 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,116 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.2 K 2024-12-10T15:38:46,116 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 654d6100dad64c10aac35dfcc59a98b3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733845123390 2024-12-10T15:38:46,116 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7b133ee4e604a9caf2ec8ecfa484810, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733845123367 2024-12-10T15:38:46,117 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c07267402a342c1aeebaca699c81814, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733845123390 2024-12-10T15:38:46,117 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting cff3e4b0f06f49a089f4c929f49fd9a2, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:46,117 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e02de74377be4919ab63dce3a6e53a4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:46,123 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:46,128 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210b2b6e21bc0244267a1377044e0be3114_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:46,129 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210b2b6e21bc0244267a1377044e0be3114_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:46,129 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b2b6e21bc0244267a1377044e0be3114_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:46,132 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#525 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:46,133 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c27ee34a0a084bfaa514c1262f48519d is 50, key is test_row_0/C:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:46,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742436_1612 (size=4469) 2024-12-10T15:38:46,168 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#524 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:46,168 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ef81c4c8c67e468f988e60b72224d1be is 175, key is test_row_0/A:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742438_1614 (size=31058) 2024-12-10T15:38:46,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742437_1613 (size=12104) 2024-12-10T15:38:46,231 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ef81c4c8c67e468f988e60b72224d1be as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be 2024-12-10T15:38:46,237 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-10T15:38:46,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,237 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:38:46,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:46,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:46,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:46,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:46,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:46,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:46,256 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into ef81c4c8c67e468f988e60b72224d1be(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:46,257 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:46,257 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845126110; duration=0sec 2024-12-10T15:38:46,257 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:46,257 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:46,257 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:46,260 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:46,261 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:46,261 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:46,261 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.2 K 2024-12-10T15:38:46,263 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 53d5323b2562406886107489bb3b13b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733845123367 2024-12-10T15:38:46,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210aec1f45609ec4cc9846258854b97c164_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845124849/Put/seqid=0 2024-12-10T15:38:46,267 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c14c1272a7d74b268841fa14e95424ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733845123390 2024-12-10T15:38:46,268 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 988862916eae4ab08128397d0a35a658, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:46,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742439_1615 (size=12154) 2024-12-10T15:38:46,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:46,290 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:46,291 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/eb29c0d0de044acea9230e34315cda96 is 50, key is test_row_0/B:col10/1733845124819/Put/seqid=0 2024-12-10T15:38:46,294 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210aec1f45609ec4cc9846258854b97c164_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210aec1f45609ec4cc9846258854b97c164_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:46,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7c21663bf95e43048b551c259743140d, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:46,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7c21663bf95e43048b551c259743140d is 175, key is test_row_0/A:col10/1733845124849/Put/seqid=0 2024-12-10T15:38:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742440_1616 (size=12104) 2024-12-10T15:38:46,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742441_1617 (size=30955) 2024-12-10T15:38:46,353 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7c21663bf95e43048b551c259743140d 2024-12-10T15:38:46,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/39865f468c014c5cbef1def540a2eab9 is 50, key is test_row_0/B:col10/1733845124849/Put/seqid=0 2024-12-10T15:38:46,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742442_1618 (size=12001) 2024-12-10T15:38:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:46,600 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c27ee34a0a084bfaa514c1262f48519d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c27ee34a0a084bfaa514c1262f48519d 2024-12-10T15:38:46,604 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into c27ee34a0a084bfaa514c1262f48519d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:46,604 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:46,604 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845126110; duration=0sec 2024-12-10T15:38:46,604 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:46,604 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:46,748 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/eb29c0d0de044acea9230e34315cda96 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/eb29c0d0de044acea9230e34315cda96 2024-12-10T15:38:46,754 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into eb29c0d0de044acea9230e34315cda96(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:46,754 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:46,754 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845126110; duration=0sec 2024-12-10T15:38:46,754 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:46,754 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:46,804 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/39865f468c014c5cbef1def540a2eab9 2024-12-10T15:38:46,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f6d8cf2c8fb54c648e6b3695d469f1f7 is 50, key is test_row_0/C:col10/1733845124849/Put/seqid=0 2024-12-10T15:38:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742443_1619 (size=12001) 2024-12-10T15:38:46,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:46,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:46,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845186980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:46,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845186980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845186981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:46,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845186981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:46,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:46,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845186982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845187084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845187084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845187084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845187084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,233 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f6d8cf2c8fb54c648e6b3695d469f1f7 2024-12-10T15:38:47,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/7c21663bf95e43048b551c259743140d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d 2024-12-10T15:38:47,245 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d, entries=150, sequenceid=78, filesize=30.2 K 2024-12-10T15:38:47,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/39865f468c014c5cbef1def540a2eab9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9 2024-12-10T15:38:47,249 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:38:47,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f6d8cf2c8fb54c648e6b3695d469f1f7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7 2024-12-10T15:38:47,253 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7, entries=150, sequenceid=78, filesize=11.7 K 2024-12-10T15:38:47,253 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for f84cee47e13bc12ff2ef81f5e007a839 in 1016ms, sequenceid=78, compaction requested=false 2024-12-10T15:38:47,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:47,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:47,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-10T15:38:47,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-10T15:38:47,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-10T15:38:47,276 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8000 sec 2024-12-10T15:38:47,277 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.8120 sec 2024-12-10T15:38:47,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:47,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:47,293 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:47,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107154ae7ca74b4380ae9cb8248d2b4f9c_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:47,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742444_1620 (size=14594) 2024-12-10T15:38:47,304 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:47,311 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412107154ae7ca74b4380ae9cb8248d2b4f9c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107154ae7ca74b4380ae9cb8248d2b4f9c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:47,311 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/52f5bbb2d3d6432f963289f4dcef416a, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:47,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/52f5bbb2d3d6432f963289f4dcef416a is 175, key is test_row_0/A:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:47,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742445_1621 (size=39549) 2024-12-10T15:38:47,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845187345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845187345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845187345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845187346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845187449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845187449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845187450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845187451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-10T15:38:47,570 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-10T15:38:47,571 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:47,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-10T15:38:47,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:47,572 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:47,572 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:47,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:47,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845187653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845187653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845187653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845187653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:47,717 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=98, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/52f5bbb2d3d6432f963289f4dcef416a 2024-12-10T15:38:47,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:47,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:47,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8f960cdbaafb4bae809d02cfc1f30d82 is 50, key is test_row_0/B:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:47,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742446_1622 (size=12001) 2024-12-10T15:38:47,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:47,881 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:47,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:47,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:47,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:47,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:47,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845187955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845187955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845187959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:47,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:47,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845187959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,035 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8f960cdbaafb4bae809d02cfc1f30d82 2024-12-10T15:38:48,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/362f02823428483088891f34749c86f0 is 50, key is test_row_0/C:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:48,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:48,187 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:48,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742447_1623 (size=12001) 2024-12-10T15:38:48,343 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:48,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845188462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845188463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:48,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845188469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845188470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,496 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:48,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=98 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/362f02823428483088891f34749c86f0 2024-12-10T15:38:48,651 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/52f5bbb2d3d6432f963289f4dcef416a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a 2024-12-10T15:38:48,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:48,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:48,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a, entries=200, sequenceid=98, filesize=38.6 K 2024-12-10T15:38:48,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8f960cdbaafb4bae809d02cfc1f30d82 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82 2024-12-10T15:38:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:48,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T15:38:48,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/362f02823428483088891f34749c86f0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0 2024-12-10T15:38:48,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0, entries=150, sequenceid=98, filesize=11.7 K 2024-12-10T15:38:48,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for f84cee47e13bc12ff2ef81f5e007a839 in 1391ms, sequenceid=98, compaction requested=true 2024-12-10T15:38:48,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:48,684 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:48,684 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:48,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:48,688 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:48,688 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:48,688 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,688 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=99.2 K 2024-12-10T15:38:48,688 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,688 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a] 2024-12-10T15:38:48,688 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:48,688 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:48,688 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,688 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/eb29c0d0de044acea9230e34315cda96, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.3 K 2024-12-10T15:38:48,689 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef81c4c8c67e468f988e60b72224d1be, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:48,689 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting eb29c0d0de044acea9230e34315cda96, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:48,689 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c21663bf95e43048b551c259743140d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845124848 2024-12-10T15:38:48,691 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 39865f468c014c5cbef1def540a2eab9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845124848 2024-12-10T15:38:48,691 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52f5bbb2d3d6432f963289f4dcef416a, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:48,695 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f960cdbaafb4bae809d02cfc1f30d82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:48,714 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#533 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:48,715 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/74e9ac07f15c4479b8629d350fc9b5dd is 50, key is test_row_0/B:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:48,720 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:48,722 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121093a348813a894f2e855057e40d7e3c2d_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:48,723 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121093a348813a894f2e855057e40d7e3c2d_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:48,723 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121093a348813a894f2e855057e40d7e3c2d_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:48,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742448_1624 (size=12207) 2024-12-10T15:38:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742449_1625 (size=4469) 2024-12-10T15:38:48,772 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/74e9ac07f15c4479b8629d350fc9b5dd as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/74e9ac07f15c4479b8629d350fc9b5dd 2024-12-10T15:38:48,779 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into 74e9ac07f15c4479b8629d350fc9b5dd(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:48,779 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:48,779 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845128684; duration=0sec 2024-12-10T15:38:48,779 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:48,779 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:48,779 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:48,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:48,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:48,780 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,780 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c27ee34a0a084bfaa514c1262f48519d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.3 K 2024-12-10T15:38:48,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c27ee34a0a084bfaa514c1262f48519d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733845123701 2024-12-10T15:38:48,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f6d8cf2c8fb54c648e6b3695d469f1f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733845124848 2024-12-10T15:38:48,780 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 362f02823428483088891f34749c86f0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:48,786 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:48,786 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/7fd382259201484f9a89e568e5184233 is 50, key is test_row_0/C:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:48,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:48,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742450_1626 (size=12207) 2024-12-10T15:38:48,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-10T15:38:48,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:48,804 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:48,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:48,811 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/7fd382259201484f9a89e568e5184233 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7fd382259201484f9a89e568e5184233 2024-12-10T15:38:48,814 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 7fd382259201484f9a89e568e5184233(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:48,814 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:48,814 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845128684; duration=0sec 2024-12-10T15:38:48,814 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:48,814 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:48,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121080242a90f001489a9b17283ff69beef1_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845127345/Put/seqid=0 2024-12-10T15:38:48,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742451_1627 (size=12154) 2024-12-10T15:38:48,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:48,847 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121080242a90f001489a9b17283ff69beef1_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121080242a90f001489a9b17283ff69beef1_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:48,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/855874f31f35486184acc51575fd8e2f, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:48,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/855874f31f35486184acc51575fd8e2f is 175, key is test_row_0/A:col10/1733845127345/Put/seqid=0 2024-12-10T15:38:48,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742452_1628 (size=30955) 2024-12-10T15:38:48,861 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/855874f31f35486184acc51575fd8e2f 2024-12-10T15:38:48,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/9cc2e9e6d5c149d197ca0f1f1019f525 is 50, key is test_row_0/B:col10/1733845127345/Put/seqid=0 2024-12-10T15:38:48,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742453_1629 (size=12001) 2024-12-10T15:38:48,884 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/9cc2e9e6d5c149d197ca0f1f1019f525 2024-12-10T15:38:48,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/cc1e8a5d1a424cc59adfc89ff897072d is 50, key is test_row_0/C:col10/1733845127345/Put/seqid=0 2024-12-10T15:38:48,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742454_1630 (size=12001) 2024-12-10T15:38:48,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:48,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:49,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845189016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845189119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,169 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#534 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:49,169 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/08e06894b71444df982a8eae72d890cb is 175, key is test_row_0/A:col10/1733845127292/Put/seqid=0 2024-12-10T15:38:49,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742455_1631 (size=31161) 2024-12-10T15:38:49,309 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/cc1e8a5d1a424cc59adfc89ff897072d 2024-12-10T15:38:49,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/855874f31f35486184acc51575fd8e2f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f 2024-12-10T15:38:49,326 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f, entries=150, sequenceid=118, filesize=30.2 K 2024-12-10T15:38:49,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/9cc2e9e6d5c149d197ca0f1f1019f525 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525 2024-12-10T15:38:49,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845189327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,329 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T15:38:49,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/cc1e8a5d1a424cc59adfc89ff897072d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d 2024-12-10T15:38:49,333 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d, entries=150, sequenceid=118, filesize=11.7 K 2024-12-10T15:38:49,334 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for f84cee47e13bc12ff2ef81f5e007a839 in 530ms, sequenceid=118, compaction requested=false 2024-12-10T15:38:49,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:49,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:49,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-10T15:38:49,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-10T15:38:49,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-10T15:38:49,336 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7630 sec 2024-12-10T15:38:49,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.7660 sec 2024-12-10T15:38:49,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:49,479 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T15:38:49,483 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:49,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:49,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ce5b56b8979d4789a2972d1bf0c3fd2f_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:49,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845189505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,510 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845189507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845189509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845189509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742456_1632 (size=12304) 2024-12-10T15:38:49,590 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/08e06894b71444df982a8eae72d890cb as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb 2024-12-10T15:38:49,593 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 08e06894b71444df982a8eae72d890cb(size=30.4 K), total size for store is 60.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:49,593 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:49,593 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845128683; duration=0sec 2024-12-10T15:38:49,593 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:49,594 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:49,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845189610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845189611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845189613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845189613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845189631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-10T15:38:49,677 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-10T15:38:49,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:49,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-10T15:38:49,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:49,679 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:49,679 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:49,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:49,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:49,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845189813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845189814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845189816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845189817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,831 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T15:38:49,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:49,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:49,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:49,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:49,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:49,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:49,918 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:49,921 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210ce5b56b8979d4789a2972d1bf0c3fd2f_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce5b56b8979d4789a2972d1bf0c3fd2f_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:49,922 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/1bdb2fcc2e674ae69d2424cccca3068d, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:49,922 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/1bdb2fcc2e674ae69d2424cccca3068d is 175, key is test_row_0/A:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742457_1633 (size=31105) 2024-12-10T15:38:49,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:49,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:49,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T15:38:49,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:49,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:49,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:49,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:49,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:49,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845190115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845190118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845190119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845190120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,135 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845190133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,136 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T15:38:50,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:50,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,246 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-10T15:38:50,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:50,288 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T15:38:50,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:50,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:50,345 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=138, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/1bdb2fcc2e674ae69d2424cccca3068d 2024-12-10T15:38:50,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6999056375af498ca477bda998c29996 is 50, key is test_row_0/B:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:50,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742458_1634 (size=12151) 2024-12-10T15:38:50,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6999056375af498ca477bda998c29996 2024-12-10T15:38:50,388 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0de6c751d804422d8035ebb7869c7520 is 50, key is test_row_0/C:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:50,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742459_1635 (size=12151) 2024-12-10T15:38:50,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0de6c751d804422d8035ebb7869c7520 2024-12-10T15:38:50,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/1bdb2fcc2e674ae69d2424cccca3068d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d 2024-12-10T15:38:50,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d, entries=150, sequenceid=138, filesize=30.4 K 2024-12-10T15:38:50,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6999056375af498ca477bda998c29996 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996 2024-12-10T15:38:50,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996, entries=150, sequenceid=138, filesize=11.9 K 2024-12-10T15:38:50,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0de6c751d804422d8035ebb7869c7520 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520 2024-12-10T15:38:50,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520, entries=150, sequenceid=138, filesize=11.9 K 2024-12-10T15:38:50,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for f84cee47e13bc12ff2ef81f5e007a839 in 954ms, sequenceid=138, compaction requested=true 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:50,433 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:50,433 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:50,433 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:50,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:50,434 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:50,434 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,434 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=91.0 K 2024-12-10T15:38:50,435 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d] 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:50,435 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,435 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/74e9ac07f15c4479b8629d350fc9b5dd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.5 K 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08e06894b71444df982a8eae72d890cb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 74e9ac07f15c4479b8629d350fc9b5dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:50,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 855874f31f35486184acc51575fd8e2f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733845127303 2024-12-10T15:38:50,436 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 9cc2e9e6d5c149d197ca0f1f1019f525, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733845127303 2024-12-10T15:38:50,436 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bdb2fcc2e674ae69d2424cccca3068d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:50,436 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6999056375af498ca477bda998c29996, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:50,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,441 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:50,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:50,445 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#542 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:50,445 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8ed75e686dab4566aa8e2945681c64b4 is 50, key is test_row_0/B:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:50,446 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:50,468 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210aa6af216dff74b47876fd9d42b34ec1a_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:50,469 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210aa6af216dff74b47876fd9d42b34ec1a_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:50,469 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210aa6af216dff74b47876fd9d42b34ec1a_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:50,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210804eb66ba28843598c6feaa48ac48718_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845129492/Put/seqid=0 2024-12-10T15:38:50,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742460_1636 (size=12459) 2024-12-10T15:38:50,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742461_1637 (size=4469) 2024-12-10T15:38:50,530 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#543 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:50,531 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b016ec6dc741418ab42e9c52411e19e8 is 175, key is test_row_0/A:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:50,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742462_1638 (size=12304) 2024-12-10T15:38:50,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:50,549 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210804eb66ba28843598c6feaa48ac48718_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210804eb66ba28843598c6feaa48ac48718_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:50,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/51169a5260ff458c87685887e99624d0, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:50,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/51169a5260ff458c87685887e99624d0 is 175, key is test_row_0/A:col10/1733845129492/Put/seqid=0 2024-12-10T15:38:50,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742463_1639 (size=31413) 2024-12-10T15:38:50,583 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b016ec6dc741418ab42e9c52411e19e8 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8 2024-12-10T15:38:50,588 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into b016ec6dc741418ab42e9c52411e19e8(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:50,588 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:50,588 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845130433; duration=0sec 2024-12-10T15:38:50,588 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:50,588 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:50,588 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:50,589 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:50,589 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:50,589 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:50,589 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7fd382259201484f9a89e568e5184233, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.5 K 2024-12-10T15:38:50,589 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd382259201484f9a89e568e5184233, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=98, earliestPutTs=1733845126980 2024-12-10T15:38:50,590 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc1e8a5d1a424cc59adfc89ff897072d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733845127303 2024-12-10T15:38:50,590 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0de6c751d804422d8035ebb7869c7520, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:50,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742464_1640 (size=31105) 2024-12-10T15:38:50,600 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#545 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:50,601 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/1cb6e26af32c4f7cb6df852a6f0494ac is 50, key is test_row_0/C:col10/1733845129472/Put/seqid=0 2024-12-10T15:38:50,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:50,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:50,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742465_1641 (size=12459) 2024-12-10T15:38:50,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845190646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845190646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845190646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845190652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845190750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845190752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845190755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845190755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:50,893 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8ed75e686dab4566aa8e2945681c64b4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ed75e686dab4566aa8e2945681c64b4 2024-12-10T15:38:50,899 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into 8ed75e686dab4566aa8e2945681c64b4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:50,899 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:50,899 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845130433; duration=0sec 2024-12-10T15:38:50,899 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:50,899 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:50,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845190955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845190956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845190959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845190962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:50,993 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/51169a5260ff458c87685887e99624d0 2024-12-10T15:38:51,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3adc69728e5b4cf3a81c995ba00efa09 is 50, key is test_row_0/B:col10/1733845129492/Put/seqid=0 2024-12-10T15:38:51,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742466_1642 (size=12151) 2024-12-10T15:38:51,020 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3adc69728e5b4cf3a81c995ba00efa09 2024-12-10T15:38:51,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e87df0ff98034ac39836791624754c89 is 50, key is test_row_0/C:col10/1733845129492/Put/seqid=0 2024-12-10T15:38:51,039 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/1cb6e26af32c4f7cb6df852a6f0494ac as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/1cb6e26af32c4f7cb6df852a6f0494ac 2024-12-10T15:38:51,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742467_1643 (size=12151) 2024-12-10T15:38:51,047 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 1cb6e26af32c4f7cb6df852a6f0494ac(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:51,047 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,047 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845130433; duration=0sec 2024-12-10T15:38:51,047 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:51,047 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:51,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845191138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845191257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845191258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,260 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845191260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845191271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,447 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e87df0ff98034ac39836791624754c89 2024-12-10T15:38:51,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/51169a5260ff458c87685887e99624d0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0 2024-12-10T15:38:51,464 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0, entries=150, sequenceid=157, filesize=30.4 K 2024-12-10T15:38:51,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3adc69728e5b4cf3a81c995ba00efa09 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09 2024-12-10T15:38:51,481 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T15:38:51,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e87df0ff98034ac39836791624754c89 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89 2024-12-10T15:38:51,487 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89, entries=150, sequenceid=157, filesize=11.9 K 2024-12-10T15:38:51,495 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for f84cee47e13bc12ff2ef81f5e007a839 in 1054ms, sequenceid=157, compaction requested=false 2024-12-10T15:38:51,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-10T15:38:51,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-10T15:38:51,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-10T15:38:51,504 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8200 sec 2024-12-10T15:38:51,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.8270 sec 2024-12-10T15:38:51,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:51,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:51,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105a97a754dcfc4c899a4ad331fdef6cbe_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742468_1644 (size=14794) 2024-12-10T15:38:51,772 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:51,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845191773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845191773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845191774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,777 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105a97a754dcfc4c899a4ad331fdef6cbe_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a97a754dcfc4c899a4ad331fdef6cbe_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:51,778 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/649184c841e048eeaf32f7766d756c71, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/649184c841e048eeaf32f7766d756c71 is 175, key is test_row_0/A:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845191778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-10T15:38:51,785 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-10T15:38:51,786 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-10T15:38:51,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T15:38:51,792 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:51,792 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:51,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:51,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742469_1645 (size=39749) 2024-12-10T15:38:51,805 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=180, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/649184c841e048eeaf32f7766d756c71 2024-12-10T15:38:51,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/88cbfbb3240f4b7bba2b48be9b02951b is 50, key is test_row_0/B:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742470_1646 (size=12151) 2024-12-10T15:38:51,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/88cbfbb3240f4b7bba2b48be9b02951b 2024-12-10T15:38:51,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0b7beb0246fd46fe9f9c33213de15803 is 50, key is test_row_0/C:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742471_1647 (size=12151) 2024-12-10T15:38:51,866 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0b7beb0246fd46fe9f9c33213de15803 2024-12-10T15:38:51,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845191877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845191877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:51,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845191879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/649184c841e048eeaf32f7766d756c71 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71 2024-12-10T15:38:51,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T15:38:51,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71, entries=200, sequenceid=180, filesize=38.8 K 2024-12-10T15:38:51,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/88cbfbb3240f4b7bba2b48be9b02951b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b 2024-12-10T15:38:51,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b, entries=150, sequenceid=180, filesize=11.9 K 2024-12-10T15:38:51,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/0b7beb0246fd46fe9f9c33213de15803 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803 2024-12-10T15:38:51,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803, entries=150, sequenceid=180, filesize=11.9 K 2024-12-10T15:38:51,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for f84cee47e13bc12ff2ef81f5e007a839 in 139ms, sequenceid=180, compaction requested=true 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:51,900 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:51,900 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:51,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:51,901 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,901 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,901 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ed75e686dab4566aa8e2945681c64b4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.9 K 2024-12-10T15:38:51,901 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=99.9 K 2024-12-10T15:38:51,901 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71] 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ed75e686dab4566aa8e2945681c64b4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b016ec6dc741418ab42e9c52411e19e8, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3adc69728e5b4cf3a81c995ba00efa09, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733845129492 2024-12-10T15:38:51,901 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51169a5260ff458c87685887e99624d0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733845129492 2024-12-10T15:38:51,902 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 649184c841e048eeaf32f7766d756c71, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130634 2024-12-10T15:38:51,902 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 88cbfbb3240f4b7bba2b48be9b02951b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130638 2024-12-10T15:38:51,907 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,908 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#551 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:51,908 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/1dc70abfa7244ae7878167fb2f430f80 is 50, key is test_row_0/B:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,912 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210d779371532994fab84c942461c9ae870_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,913 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210d779371532994fab84c942461c9ae870_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,913 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d779371532994fab84c942461c9ae870_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742472_1648 (size=12561) 2024-12-10T15:38:51,942 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/1dc70abfa7244ae7878167fb2f430f80 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1dc70abfa7244ae7878167fb2f430f80 2024-12-10T15:38:51,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:51,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-10T15:38:51,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,943 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:51,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:51,947 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into 1dc70abfa7244ae7878167fb2f430f80(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:51,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,947 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845131900; duration=0sec 2024-12-10T15:38:51,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:51,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:51,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:51,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210801ce79456024b86b5930ce5fb18bdcf_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845131765/Put/seqid=0 2024-12-10T15:38:51,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742473_1649 (size=4469) 2024-12-10T15:38:51,951 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#552 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:51,951 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cd5028483e0f4f56baa34a9ec3f27614 is 175, key is test_row_0/A:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,951 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:51,952 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:51,952 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:51,952 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/1cb6e26af32c4f7cb6df852a6f0494ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=35.9 K 2024-12-10T15:38:51,952 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cb6e26af32c4f7cb6df852a6f0494ac, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733845129016 2024-12-10T15:38:51,953 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e87df0ff98034ac39836791624754c89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733845129492 2024-12-10T15:38:51,953 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b7beb0246fd46fe9f9c33213de15803, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130638 2024-12-10T15:38:51,959 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#554 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:51,959 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/940fc0445ead4740b88392a50cb2722f is 50, key is test_row_0/C:col10/1733845130638/Put/seqid=0 2024-12-10T15:38:51,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742475_1651 (size=31515) 2024-12-10T15:38:51,964 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/cd5028483e0f4f56baa34a9ec3f27614 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614 2024-12-10T15:38:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742476_1652 (size=12561) 2024-12-10T15:38:51,968 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into cd5028483e0f4f56baa34a9ec3f27614(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:51,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,968 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845131900; duration=0sec 2024-12-10T15:38:51,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:51,969 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:51,970 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/940fc0445ead4740b88392a50cb2722f as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/940fc0445ead4740b88392a50cb2722f 2024-12-10T15:38:51,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742474_1650 (size=12304) 2024-12-10T15:38:51,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:51,976 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 940fc0445ead4740b88392a50cb2722f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:51,976 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:51,976 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845131900; duration=0sec 2024-12-10T15:38:51,976 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:51,976 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:51,976 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210801ce79456024b86b5930ce5fb18bdcf_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210801ce79456024b86b5930ce5fb18bdcf_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:51,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/80decb51f3ee455c80dcd1ab5e17a24b, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:51,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/80decb51f3ee455c80dcd1ab5e17a24b is 175, key is test_row_0/A:col10/1733845131765/Put/seqid=0 2024-12-10T15:38:52,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742477_1653 (size=31105) 2024-12-10T15:38:52,005 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=196, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/80decb51f3ee455c80dcd1ab5e17a24b 2024-12-10T15:38:52,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/a2b60b1eb3394124abf6146d4235dffc is 50, key is test_row_0/B:col10/1733845131765/Put/seqid=0 2024-12-10T15:38:52,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742478_1654 (size=12151) 2024-12-10T15:38:52,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:52,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:52,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T15:38:52,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845192101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845192104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845192104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,207 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845192205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845192207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845192215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T15:38:52,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845192412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845192415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845192419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,475 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/a2b60b1eb3394124abf6146d4235dffc 2024-12-10T15:38:52,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/2bbd6ec28735420a90144c45deeadd03 is 50, key is test_row_0/C:col10/1733845131765/Put/seqid=0 2024-12-10T15:38:52,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742479_1655 (size=12151) 2024-12-10T15:38:52,532 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/2bbd6ec28735420a90144c45deeadd03 2024-12-10T15:38:52,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/80decb51f3ee455c80dcd1ab5e17a24b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b 2024-12-10T15:38:52,561 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b, entries=150, sequenceid=196, filesize=30.4 K 2024-12-10T15:38:52,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/a2b60b1eb3394124abf6146d4235dffc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc 2024-12-10T15:38:52,566 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T15:38:52,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/2bbd6ec28735420a90144c45deeadd03 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03 2024-12-10T15:38:52,570 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03, entries=150, sequenceid=196, filesize=11.9 K 2024-12-10T15:38:52,570 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for f84cee47e13bc12ff2ef81f5e007a839 in 627ms, sequenceid=196, compaction requested=false 2024-12-10T15:38:52,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:52,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:52,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-10T15:38:52,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-10T15:38:52,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-10T15:38:52,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 780 msec 2024-12-10T15:38:52,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 791 msec 2024-12-10T15:38:52,724 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:52,724 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:52,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:52,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121009c0c4e39d57484b82512dbe403dc29a_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:52,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742480_1656 (size=12304) 2024-12-10T15:38:52,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845192783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,787 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845192784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845192784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845192800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845192887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845192891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:52,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845192891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:52,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-10T15:38:52,904 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-10T15:38:52,908 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-10T15:38:52,909 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:52,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:52,910 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:52,910 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:53,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845193095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845193095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845193097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845193155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,158 DEBUG [Thread-2644 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:53,160 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:53,171 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121009c0c4e39d57484b82512dbe403dc29a_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009c0c4e39d57484b82512dbe403dc29a_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:53,177 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/269ca785173342e48780004d4f0ddaea, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/269ca785173342e48780004d4f0ddaea is 175, key is test_row_0/A:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742481_1657 (size=31105) 2024-12-10T15:38:53,200 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/269ca785173342e48780004d4f0ddaea 2024-12-10T15:38:53,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6a23261a6e204611a6a333736a9b8b0c is 50, key is test_row_0/B:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:53,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,224 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742482_1658 (size=12151) 2024-12-10T15:38:53,376 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845193400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845193401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845193406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:53,535 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6a23261a6e204611a6a333736a9b8b0c 2024-12-10T15:38:53,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/52637347331341e98b9f7c074c8bf1c2 is 50, key is test_row_0/C:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742483_1659 (size=12151) 2024-12-10T15:38:53,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/52637347331341e98b9f7c074c8bf1c2 2024-12-10T15:38:53,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:53,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/269ca785173342e48780004d4f0ddaea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea 2024-12-10T15:38:53,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea, entries=150, sequenceid=221, filesize=30.4 K 2024-12-10T15:38:53,721 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6a23261a6e204611a6a333736a9b8b0c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c 2024-12-10T15:38:53,724 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c, entries=150, sequenceid=221, filesize=11.9 K 2024-12-10T15:38:53,724 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/52637347331341e98b9f7c074c8bf1c2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2 2024-12-10T15:38:53,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2, entries=150, sequenceid=221, filesize=11.9 K 2024-12-10T15:38:53,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for f84cee47e13bc12ff2ef81f5e007a839 in 1007ms, sequenceid=221, compaction requested=true 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:53,732 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:53,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:53,734 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:53,734 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:53,734 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:53,734 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,734 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=91.5 K 2024-12-10T15:38:53,734 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,734 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea] 2024-12-10T15:38:53,735 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd5028483e0f4f56baa34a9ec3f27614, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130638 2024-12-10T15:38:53,735 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80decb51f3ee455c80dcd1ab5e17a24b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733845131765 2024-12-10T15:38:53,735 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 269ca785173342e48780004d4f0ddaea, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:53,739 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,742 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210b424ab6a26974806a4562ebf5d950572_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,743 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210b424ab6a26974806a4562ebf5d950572_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,744 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210b424ab6a26974806a4562ebf5d950572_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,747 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:53,747 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:53,747 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,748 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1dc70abfa7244ae7878167fb2f430f80, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.0 K 2024-12-10T15:38:53,748 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dc70abfa7244ae7878167fb2f430f80, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130638 2024-12-10T15:38:53,748 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a2b60b1eb3394124abf6146d4235dffc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733845131765 2024-12-10T15:38:53,748 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a23261a6e204611a6a333736a9b8b0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:53,765 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#561 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:53,765 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/a1fd7a137e3b413fb5e98362bd79e313 is 50, key is test_row_0/B:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742484_1660 (size=4469) 2024-12-10T15:38:53,780 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#560 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:53,781 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ac1a895025c4462ba8b75688b419abb6 is 175, key is test_row_0/A:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742485_1661 (size=12663) 2024-12-10T15:38:53,797 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/a1fd7a137e3b413fb5e98362bd79e313 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a1fd7a137e3b413fb5e98362bd79e313 2024-12-10T15:38:53,801 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into a1fd7a137e3b413fb5e98362bd79e313(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:53,801 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:53,801 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845133732; duration=0sec 2024-12-10T15:38:53,801 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:53,801 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:53,802 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:53,806 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:53,806 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:53,806 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,806 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/940fc0445ead4740b88392a50cb2722f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.0 K 2024-12-10T15:38:53,807 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 940fc0445ead4740b88392a50cb2722f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733845130638 2024-12-10T15:38:53,807 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bbd6ec28735420a90144c45deeadd03, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733845131765 2024-12-10T15:38:53,807 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 52637347331341e98b9f7c074c8bf1c2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:53,814 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#562 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:53,814 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c9b499b7e4d54b1b882422a4b51720a0 is 50, key is test_row_0/C:col10/1733845132101/Put/seqid=0 2024-12-10T15:38:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742487_1663 (size=12663) 2024-12-10T15:38:53,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742486_1662 (size=31617) 2024-12-10T15:38:53,825 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c9b499b7e4d54b1b882422a4b51720a0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c9b499b7e4d54b1b882422a4b51720a0 2024-12-10T15:38:53,830 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into c9b499b7e4d54b1b882422a4b51720a0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:53,830 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:53,830 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845133732; duration=0sec 2024-12-10T15:38:53,830 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:53,830 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:53,857 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-10T15:38:53,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:53,858 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:53,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c9ef9246e8ee46eebb7af1ebabd2c8cc_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845132783/Put/seqid=0 2024-12-10T15:38:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742488_1664 (size=12304) 2024-12-10T15:38:53,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:53,887 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210c9ef9246e8ee46eebb7af1ebabd2c8cc_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c9ef9246e8ee46eebb7af1ebabd2c8cc_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:53,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f6b7d770ec3549d4a30a650d4753e350, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:53,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f6b7d770ec3549d4a30a650d4753e350 is 175, key is test_row_0/A:col10/1733845132783/Put/seqid=0 2024-12-10T15:38:53,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:53,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:53,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742489_1665 (size=31105) 2024-12-10T15:38:53,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845193927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845193930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:53,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:53,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845193930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:54,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845194031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845194033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,035 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845194034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,236 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ac1a895025c4462ba8b75688b419abb6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6 2024-12-10T15:38:54,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845194235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845194235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,240 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into ac1a895025c4462ba8b75688b419abb6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:54,240 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:54,240 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845133732; duration=0sec 2024-12-10T15:38:54,240 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:54,240 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:54,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845194242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,308 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f6b7d770ec3549d4a30a650d4753e350 2024-12-10T15:38:54,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ba497316a0a3437b81e4d3edfbfdbb70 is 50, key is test_row_0/B:col10/1733845132783/Put/seqid=0 2024-12-10T15:38:54,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742490_1666 (size=12151) 2024-12-10T15:38:54,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845194538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845194539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845194546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,785 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ba497316a0a3437b81e4d3edfbfdbb70 2024-12-10T15:38:54,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f3aa33eec8fd47d891de68191b2b9e48 is 50, key is test_row_0/C:col10/1733845132783/Put/seqid=0 2024-12-10T15:38:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742491_1667 (size=12151) 2024-12-10T15:38:54,804 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f3aa33eec8fd47d891de68191b2b9e48 2024-12-10T15:38:54,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f6b7d770ec3549d4a30a650d4753e350 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350 2024-12-10T15:38:54,816 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350, entries=150, sequenceid=237, filesize=30.4 K 2024-12-10T15:38:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ba497316a0a3437b81e4d3edfbfdbb70 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70 2024-12-10T15:38:54,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:54,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845194817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:54,820 DEBUG [Thread-2642 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:54,821 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T15:38:54,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/f3aa33eec8fd47d891de68191b2b9e48 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48 2024-12-10T15:38:54,826 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48, entries=150, sequenceid=237, filesize=11.9 K 2024-12-10T15:38:54,827 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for f84cee47e13bc12ff2ef81f5e007a839 in 970ms, sequenceid=237, compaction requested=false 2024-12-10T15:38:54,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:54,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:54,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-10T15:38:54,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-10T15:38:54,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-10T15:38:54,829 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9170 sec 2024-12-10T15:38:54,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.9210 sec 2024-12-10T15:38:55,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-10T15:38:55,016 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-10T15:38:55,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:55,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-10T15:38:55,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:55,023 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:55,027 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:55,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:55,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:55,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:55,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105b6b67a0f7cf43d495235d66a2c63615_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845195072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845195072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845195072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742492_1668 (size=12404) 2024-12-10T15:38:55,079 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:55,082 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412105b6b67a0f7cf43d495235d66a2c63615_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105b6b67a0f7cf43d495235d66a2c63615_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:55,083 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b4fe46727bd34ab3960095f8498bcd03, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,084 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b4fe46727bd34ab3960095f8498bcd03 is 175, key is test_row_0/A:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742493_1669 (size=31205) 2024-12-10T15:38:55,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:55,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845195175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845195175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845195175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,180 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-10T15:38:55,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:55,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:55,332 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-10T15:38:55,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:55,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,333 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845195377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,379 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845195377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845195377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-10T15:38:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:55,487 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=261, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b4fe46727bd34ab3960095f8498bcd03 2024-12-10T15:38:55,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3413358f132c49589fad32031b166eae is 50, key is test_row_0/B:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742494_1670 (size=12251) 2024-12-10T15:38:55,502 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3413358f132c49589fad32031b166eae 2024-12-10T15:38:55,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6a39ce8c502f46eaaaeee675d1909c27 is 50, key is test_row_0/C:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742495_1671 (size=12251) 2024-12-10T15:38:55,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6a39ce8c502f46eaaaeee675d1909c27 2024-12-10T15:38:55,558 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/b4fe46727bd34ab3960095f8498bcd03 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03 2024-12-10T15:38:55,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03, entries=150, sequenceid=261, filesize=30.5 K 2024-12-10T15:38:55,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/3413358f132c49589fad32031b166eae as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae 2024-12-10T15:38:55,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae, entries=150, sequenceid=261, filesize=12.0 K 2024-12-10T15:38:55,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/6a39ce8c502f46eaaaeee675d1909c27 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27 2024-12-10T15:38:55,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27, entries=150, sequenceid=261, filesize=12.0 K 2024-12-10T15:38:55,574 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for f84cee47e13bc12ff2ef81f5e007a839 in 527ms, sequenceid=261, compaction requested=true 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:55,574 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:55,574 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:55,574 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:55,575 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:55,575 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,575 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=91.7 K 2024-12-10T15:38:55,575 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a1fd7a137e3b413fb5e98362bd79e313, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.2 K 2024-12-10T15:38:55,575 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03] 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a1fd7a137e3b413fb5e98362bd79e313, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac1a895025c4462ba8b75688b419abb6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ba497316a0a3437b81e4d3edfbfdbb70, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845132783 2024-12-10T15:38:55,575 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6b7d770ec3549d4a30a650d4753e350, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845132783 2024-12-10T15:38:55,576 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3413358f132c49589fad32031b166eae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:55,576 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4fe46727bd34ab3960095f8498bcd03, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:55,582 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,583 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#569 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:55,583 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/f2ca0a693b6d4e0080b14fcdde38452c is 50, key is test_row_0/B:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,584 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412103c49ec420d254e2a87b64f6bff0accdb_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,586 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412103c49ec420d254e2a87b64f6bff0accdb_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,586 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412103c49ec420d254e2a87b64f6bff0accdb_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742497_1673 (size=12865) 2024-12-10T15:38:55,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742496_1672 (size=4469) 2024-12-10T15:38:55,612 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#570 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:55,613 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829f1dafc107466eb0680428b2944d0a is 175, key is test_row_0/A:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,616 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/f2ca0a693b6d4e0080b14fcdde38452c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/f2ca0a693b6d4e0080b14fcdde38452c 2024-12-10T15:38:55,621 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into f2ca0a693b6d4e0080b14fcdde38452c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:55,621 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:55,621 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845135574; duration=0sec 2024-12-10T15:38:55,621 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:55,621 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:55,621 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:55,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:55,622 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:55,623 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:55,623 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,623 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c9b499b7e4d54b1b882422a4b51720a0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.2 K 2024-12-10T15:38:55,623 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting c9b499b7e4d54b1b882422a4b51720a0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1733845132094 2024-12-10T15:38:55,623 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f3aa33eec8fd47d891de68191b2b9e48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733845132783 2024-12-10T15:38:55,624 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a39ce8c502f46eaaaeee675d1909c27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:55,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742498_1674 (size=31819) 2024-12-10T15:38:55,636 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#571 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:55,637 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/8d501055a40c44eeac71a0065c3ee630 is 50, key is test_row_0/C:col10/1733845133926/Put/seqid=0 2024-12-10T15:38:55,637 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-10T15:38:55,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:55,637 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:38:55,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:55,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:55,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210267f6c83d2b840b5a93ac96c66bef635_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845135060/Put/seqid=0 2024-12-10T15:38:55,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742499_1675 (size=12865) 2024-12-10T15:38:55,678 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/8d501055a40c44eeac71a0065c3ee630 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8d501055a40c44eeac71a0065c3ee630 2024-12-10T15:38:55,681 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 8d501055a40c44eeac71a0065c3ee630(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:55,681 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:55,682 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845135574; duration=0sec 2024-12-10T15:38:55,682 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:55,682 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:55,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742500_1676 (size=12454) 2024-12-10T15:38:55,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:55,686 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210267f6c83d2b840b5a93ac96c66bef635_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210267f6c83d2b840b5a93ac96c66bef635_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:55,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/55d22c14640149848a7a1311b882a3c6, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:55,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/55d22c14640149848a7a1311b882a3c6 is 175, key is test_row_0/A:col10/1733845135060/Put/seqid=0 2024-12-10T15:38:55,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742501_1677 (size=31255) 2024-12-10T15:38:55,692 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/55d22c14640149848a7a1311b882a3c6 2024-12-10T15:38:55,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8ca8dbb97d834dc49b7b2e673329a10b is 50, key is test_row_0/B:col10/1733845135060/Put/seqid=0 2024-12-10T15:38:55,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742502_1678 (size=12301) 2024-12-10T15:38:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845195702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845195702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845195702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845195805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845195805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:55,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:55,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845195805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845196008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845196008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845196009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,036 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829f1dafc107466eb0680428b2944d0a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a 2024-12-10T15:38:56,040 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 829f1dafc107466eb0680428b2944d0a(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:56,040 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:56,040 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845135574; duration=0sec 2024-12-10T15:38:56,040 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:56,040 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:56,100 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8ca8dbb97d834dc49b7b2e673329a10b 2024-12-10T15:38:56,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/a2f4332139414f69877fcc31699fb821 is 50, key is test_row_0/C:col10/1733845135060/Put/seqid=0 2024-12-10T15:38:56,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742503_1679 (size=12301) 2024-12-10T15:38:56,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:56,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845196310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845196310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845196311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,518 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/a2f4332139414f69877fcc31699fb821 2024-12-10T15:38:56,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/55d22c14640149848a7a1311b882a3c6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6 2024-12-10T15:38:56,526 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6, entries=150, sequenceid=275, filesize=30.5 K 2024-12-10T15:38:56,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8ca8dbb97d834dc49b7b2e673329a10b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b 2024-12-10T15:38:56,530 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b, entries=150, sequenceid=275, filesize=12.0 K 2024-12-10T15:38:56,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/a2f4332139414f69877fcc31699fb821 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821 2024-12-10T15:38:56,534 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821, entries=150, sequenceid=275, filesize=12.0 K 2024-12-10T15:38:56,534 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f84cee47e13bc12ff2ef81f5e007a839 in 897ms, sequenceid=275, compaction requested=false 2024-12-10T15:38:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:56,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-10T15:38:56,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-10T15:38:56,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-10T15:38:56,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5080 sec 2024-12-10T15:38:56,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.5200 sec 2024-12-10T15:38:56,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:56,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:56,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:56,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d66dfad9002a42ff8c8edac40b07ad02_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:56,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845196827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845196828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845196835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742504_1680 (size=12454) 2024-12-10T15:38:56,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845196931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845196938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:56,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845196939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-10T15:38:57,123 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-10T15:38:57,124 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:57,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-10T15:38:57,125 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:57,125 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:57,126 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-10T15:38:57,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845197133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845197140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845197144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39540 deadline: 1733845197171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,171 DEBUG [Thread-2644 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8155 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:57,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-10T15:38:57,239 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,242 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210d66dfad9002a42ff8c8edac40b07ad02_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d66dfad9002a42ff8c8edac40b07ad02_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:57,243 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/95fc47fdd28f4913a7afdb2af003aea4, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/95fc47fdd28f4913a7afdb2af003aea4 is 175, key is test_row_0/A:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:57,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742505_1681 (size=31255) 2024-12-10T15:38:57,248 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=301, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/95fc47fdd28f4913a7afdb2af003aea4 2024-12-10T15:38:57,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/0372762de69f4d3ea7f6efbc8f0fcd44 is 50, key is test_row_0/B:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:57,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742506_1682 (size=12301) 2024-12-10T15:38:57,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,277 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-10T15:38:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-10T15:38:57,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-10T15:38:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845197437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845197445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:57,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845197445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-10T15:38:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:57,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/0372762de69f4d3ea7f6efbc8f0fcd44 2024-12-10T15:38:57,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/3182a33732f047938f8d2fc159182788 is 50, key is test_row_0/C:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:57,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742507_1683 (size=12301) 2024-12-10T15:38:57,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/3182a33732f047938f8d2fc159182788 2024-12-10T15:38:57,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/95fc47fdd28f4913a7afdb2af003aea4 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4 2024-12-10T15:38:57,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4, entries=150, sequenceid=301, filesize=30.5 K 2024-12-10T15:38:57,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/0372762de69f4d3ea7f6efbc8f0fcd44 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44 2024-12-10T15:38:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T15:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/3182a33732f047938f8d2fc159182788 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788 2024-12-10T15:38:57,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788, entries=150, sequenceid=301, filesize=12.0 K 2024-12-10T15:38:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for f84cee47e13bc12ff2ef81f5e007a839 in 886ms, sequenceid=301, compaction requested=true 2024-12-10T15:38:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:57,701 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:57,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-10T15:38:57,701 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:57,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,702 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94329 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:57,702 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:57,702 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,702 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=92.1 K 2024-12-10T15:38:57,702 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,702 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4] 2024-12-10T15:38:57,702 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:57,702 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:57,702 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,703 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 829f1dafc107466eb0680428b2944d0a, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:57,703 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/f2ca0a693b6d4e0080b14fcdde38452c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.6 K 2024-12-10T15:38:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,703 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55d22c14640149848a7a1311b882a3c6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733845135060 2024-12-10T15:38:57,703 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting f2ca0a693b6d4e0080b14fcdde38452c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:57,703 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95fc47fdd28f4913a7afdb2af003aea4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845135701 2024-12-10T15:38:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,703 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ca8dbb97d834dc49b7b2e673329a10b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733845135060 2024-12-10T15:38:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,704 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 0372762de69f4d3ea7f6efbc8f0fcd44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845135701 2024-12-10T15:38:57,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,708 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,710 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#579 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,710 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210488c70077a6b45f9bc2feb1149295c0f_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,710 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/67e2769706fd4f0bb1d6e09eeffb947b is 50, key is test_row_0/B:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,712 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210488c70077a6b45f9bc2feb1149295c0f_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,712 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210488c70077a6b45f9bc2feb1149295c0f_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-10T15:38:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742508_1684 (size=13017) 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742509_1685 (size=4469) 2024-12-10T15:38:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,730 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#578 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:57,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,731 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/e6d4062f06014dea9fcf882cce368eb6 is 175, key is test_row_0/A:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:57,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742510_1686 (size=31971) 2024-12-10T15:38:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,735 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:57,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-10T15:38:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,735 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-10T15:38:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:57,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:57,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108c7402783c4d444db4b004d4a47e4b88_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_1/A:col10/1733845136823/Put/seqid=0 2024-12-10T15:38:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742511_1687 (size=9914) 2024-12-10T15:38:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412108c7402783c4d444db4b004d4a47e4b88_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c7402783c4d444db4b004d4a47e4b88_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829a0afe63044f9d8017c28e744ba8c9, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829a0afe63044f9d8017c28e744ba8c9 is 175, key is test_row_1/A:col10/1733845136823/Put/seqid=0 2024-12-10T15:38:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742512_1688 (size=22561) 2024-12-10T15:38:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,754 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829a0afe63044f9d8017c28e744ba8c9 2024-12-10T15:38:57,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/1800042451ce441c9d732adf31bfde01 is 50, key is test_row_1/B:col10/1733845136823/Put/seqid=0 2024-12-10T15:38:57,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742513_1689 (size=9857) 2024-12-10T15:38:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,768 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/1800042451ce441c9d732adf31bfde01 2024-12-10T15:38:57,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/46d78757d5f94fc8a862118d018b544b is 50, key is test_row_1/C:col10/1733845136823/Put/seqid=0 2024-12-10T15:38:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742514_1690 (size=9857) 2024-12-10T15:38:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,805 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/46d78757d5f94fc8a862118d018b544b 2024-12-10T15:38:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/829a0afe63044f9d8017c28e744ba8c9 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9 2024-12-10T15:38:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,812 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9, entries=100, sequenceid=314, filesize=22.0 K 2024-12-10T15:38:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/1800042451ce441c9d732adf31bfde01 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01 2024-12-10T15:38:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,823 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01, entries=100, sequenceid=314, filesize=9.6 K 2024-12-10T15:38:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/46d78757d5f94fc8a862118d018b544b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b 2024-12-10T15:38:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,827 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b, entries=100, sequenceid=314, filesize=9.6 K 2024-12-10T15:38:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,827 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for f84cee47e13bc12ff2ef81f5e007a839 in 92ms, sequenceid=314, compaction requested=true 2024-12-10T15:38:57,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:57,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:57,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-10T15:38:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-10T15:38:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-10T15:38:57,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 702 msec 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 705 msec 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:57,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-10T15:38:57,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:57,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:57,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:57,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:57,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:58,013 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104b06c8968cdf4a4e9017d79629577a10_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742515_1691 (size=12454) 2024-12-10T15:38:58,038 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:58,042 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104b06c8968cdf4a4e9017d79629577a10_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104b06c8968cdf4a4e9017d79629577a10_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:58,043 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/a5bc7e6a41f6402b8a16fec7ffc5e27a, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:58,044 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/a5bc7e6a41f6402b8a16fec7ffc5e27a is 175, key is test_row_0/A:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742516_1692 (size=31255) 2024-12-10T15:38:58,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845198064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845198064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845198064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,133 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/67e2769706fd4f0bb1d6e09eeffb947b as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/67e2769706fd4f0bb1d6e09eeffb947b 2024-12-10T15:38:58,137 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into 67e2769706fd4f0bb1d6e09eeffb947b(size=12.7 K), total size for store is 22.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:58,137 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,137 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845137701; duration=0sec 2024-12-10T15:38:58,137 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:58,137 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:58,137 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:38:58,138 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/e6d4062f06014dea9fcf882cce368eb6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6 2024-12-10T15:38:58,139 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47324 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:38:58,139 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:38:58,139 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,139 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8d501055a40c44eeac71a0065c3ee630, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=46.2 K 2024-12-10T15:38:58,139 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d501055a40c44eeac71a0065c3ee630, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1733845133926 2024-12-10T15:38:58,139 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting a2f4332139414f69877fcc31699fb821, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1733845135060 2024-12-10T15:38:58,140 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 3182a33732f047938f8d2fc159182788, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845135701 2024-12-10T15:38:58,140 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 46d78757d5f94fc8a862118d018b544b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733845136823 2024-12-10T15:38:58,141 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into e6d4062f06014dea9fcf882cce368eb6(size=31.2 K), total size for store is 53.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:58,141 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,141 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845137701; duration=0sec 2024-12-10T15:38:58,142 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:58,142 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:58,146 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#584 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:58,147 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/507d2bf6bc7a46d3972705551831e833 is 50, key is test_row_0/C:col10/1733845136814/Put/seqid=0 2024-12-10T15:38:58,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742517_1693 (size=13051) 2024-12-10T15:38:58,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845198169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845198169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845198169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-10T15:38:58,228 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-10T15:38:58,229 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:38:58,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-10T15:38:58,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:38:58,230 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:38:58,231 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:38:58,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:38:58,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:38:58,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845198371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845198372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845198372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,381 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-10T15:38:58,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:58,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,461 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=325, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/a5bc7e6a41f6402b8a16fec7ffc5e27a 2024-12-10T15:38:58,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/17ce487f888e467d88724699d3c5e5f2 is 50, key is test_row_0/B:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742518_1694 (size=12301) 2024-12-10T15:38:58,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/17ce487f888e467d88724699d3c5e5f2 2024-12-10T15:38:58,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 is 50, key is test_row_0/C:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742519_1695 (size=12301) 2024-12-10T15:38:58,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:38:58,533 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-10T15:38:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,552 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/507d2bf6bc7a46d3972705551831e833 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/507d2bf6bc7a46d3972705551831e833 2024-12-10T15:38:58,555 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 507d2bf6bc7a46d3972705551831e833(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:58,555 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,555 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=12, startTime=1733845137701; duration=0sec 2024-12-10T15:38:58,555 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:58,555 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:58,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845198673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845198676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845198682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,684 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-10T15:38:58,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:58,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:38:58,840 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-10T15:38:58,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:58,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:38:58,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39556 deadline: 1733845198853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,854 DEBUG [Thread-2642 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., hostname=bf0fec90ff6d,46239,1733844953049, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-10T15:38:58,879 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 2024-12-10T15:38:58,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/a5bc7e6a41f6402b8a16fec7ffc5e27a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a 2024-12-10T15:38:58,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a, entries=150, sequenceid=325, filesize=30.5 K 2024-12-10T15:38:58,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/17ce487f888e467d88724699d3c5e5f2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2 2024-12-10T15:38:58,892 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2, entries=150, sequenceid=325, filesize=12.0 K 2024-12-10T15:38:58,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 2024-12-10T15:38:58,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0, entries=150, sequenceid=325, filesize=12.0 K 2024-12-10T15:38:58,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f84cee47e13bc12ff2ef81f5e007a839 in 903ms, sequenceid=325, compaction requested=true 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:58,896 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:38:58,896 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:38:58,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:58,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:58,903 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:38:58,903 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,903 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/67e2769706fd4f0bb1d6e09eeffb947b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=34.4 K 2024-12-10T15:38:58,903 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85787 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:38:58,903 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:38:58,903 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,903 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=83.8 K 2024-12-10T15:38:58,903 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,903 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a] 2024-12-10T15:38:58,904 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 67e2769706fd4f0bb1d6e09eeffb947b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845135701 2024-12-10T15:38:58,904 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 1800042451ce441c9d732adf31bfde01, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733845136823 2024-12-10T15:38:58,904 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6d4062f06014dea9fcf882cce368eb6, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1733845135701 2024-12-10T15:38:58,905 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 17ce487f888e467d88724699d3c5e5f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845137980 2024-12-10T15:38:58,905 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 829a0afe63044f9d8017c28e744ba8c9, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733845136823 2024-12-10T15:38:58,906 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5bc7e6a41f6402b8a16fec7ffc5e27a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845137980 2024-12-10T15:38:58,913 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#587 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:58,913 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ba3cf8793641468f8b2272359573f17e is 50, key is test_row_0/B:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,916 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:58,920 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412104f6f33b1e7264db18940651c643d92ea_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:58,921 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412104f6f33b1e7264db18940651c643d92ea_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:58,922 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412104f6f33b1e7264db18940651c643d92ea_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742520_1696 (size=13119) 2024-12-10T15:38:58,943 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ba3cf8793641468f8b2272359573f17e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba3cf8793641468f8b2272359573f17e 2024-12-10T15:38:58,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742521_1697 (size=4469) 2024-12-10T15:38:58,947 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into ba3cf8793641468f8b2272359573f17e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:58,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,947 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845138896; duration=0sec 2024-12-10T15:38:58,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:38:58,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:38:58,947 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-10T15:38:58,948 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-10T15:38:58,948 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-10T15:38:58,949 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. because compaction request was cancelled 2024-12-10T15:38:58,949 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:38:58,949 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#588 average throughput is 0.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:38:58,949 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/3ecb3df8c428473dbb1c9cadb4ff3c4e is 175, key is test_row_0/A:col10/1733845137990/Put/seqid=0 2024-12-10T15:38:58,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742522_1698 (size=32073) 2024-12-10T15:38:58,972 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/3ecb3df8c428473dbb1c9cadb4ff3c4e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e 2024-12-10T15:38:58,975 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 3ecb3df8c428473dbb1c9cadb4ff3c4e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:38:58,975 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:58,975 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845138896; duration=0sec 2024-12-10T15:38:58,975 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:38:58,975 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:38:58,994 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:58,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:58,995 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:58,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:38:58,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:38:59,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109c720b677fa7456b9d688fdfe0a8b476_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845138038/Put/seqid=0 2024-12-10T15:38:59,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742523_1699 (size=12454) 2024-12-10T15:38:59,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:38:59,024 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412109c720b677fa7456b9d688fdfe0a8b476_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109c720b677fa7456b9d688fdfe0a8b476_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:59,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/922a9783e8ab4f44b96fbeda639d578a, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:38:59,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/922a9783e8ab4f44b96fbeda639d578a is 175, key is test_row_0/A:col10/1733845138038/Put/seqid=0 2024-12-10T15:38:59,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742524_1700 (size=31255) 2024-12-10T15:38:59,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:38:59,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:38:59,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845199201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845199213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845199214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845199313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845199318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845199318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:38:59,453 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/922a9783e8ab4f44b96fbeda639d578a 2024-12-10T15:38:59,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/57816faaaa564c6ba1435076e6f091b6 is 50, key is test_row_0/B:col10/1733845138038/Put/seqid=0 2024-12-10T15:38:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742525_1701 (size=12301) 2024-12-10T15:38:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845199519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845199521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845199527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845199829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845199829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:38:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845199830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:38:59,884 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/57816faaaa564c6ba1435076e6f091b6 2024-12-10T15:38:59,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e04f1a57a85e428aab5b739637146bf2 is 50, key is test_row_0/C:col10/1733845138038/Put/seqid=0 2024-12-10T15:38:59,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742526_1702 (size=12301) 2024-12-10T15:38:59,912 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e04f1a57a85e428aab5b739637146bf2 2024-12-10T15:38:59,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/922a9783e8ab4f44b96fbeda639d578a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a 2024-12-10T15:38:59,919 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a, entries=150, sequenceid=355, filesize=30.5 K 2024-12-10T15:38:59,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/57816faaaa564c6ba1435076e6f091b6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6 2024-12-10T15:38:59,923 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6, entries=150, sequenceid=355, filesize=12.0 K 2024-12-10T15:38:59,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/e04f1a57a85e428aab5b739637146bf2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2 2024-12-10T15:38:59,929 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2, entries=150, sequenceid=355, filesize=12.0 K 2024-12-10T15:38:59,930 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for f84cee47e13bc12ff2ef81f5e007a839 in 935ms, sequenceid=355, compaction requested=true 2024-12-10T15:38:59,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:38:59,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:38:59,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-10T15:38:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-10T15:38:59,932 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-10T15:38:59,932 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7000 sec 2024-12-10T15:38:59,933 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.7030 sec 2024-12-10T15:39:00,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:00,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:00,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-10T15:39:00,338 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-10T15:39:00,339 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:39:00,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-10T15:39:00,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101ba8e2acd1944368a85e6a5b9504c2a8_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,340 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:39:00,340 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:39:00,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:39:00,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T15:39:00,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742527_1703 (size=14994) 2024-12-10T15:39:00,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845200356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845200358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845200358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T15:39:00,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845200458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,463 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845200460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845200461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,492 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-10T15:39:00,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:00,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T15:39:00,645 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-10T15:39:00,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:00,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,647 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845200663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,666 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845200665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845200665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,748 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:00,751 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412101ba8e2acd1944368a85e6a5b9504c2a8_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ba8e2acd1944368a85e6a5b9504c2a8_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:00,751 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/db7d7d2d47c34bef80090f59f4d99745, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/db7d7d2d47c34bef80090f59f4d99745 is 175, key is test_row_0/A:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742528_1704 (size=39949) 2024-12-10T15:39:00,762 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/db7d7d2d47c34bef80090f59f4d99745 2024-12-10T15:39:00,773 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/02082fd1eb74411c8090eef2f9227c81 is 50, key is test_row_0/B:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742529_1705 (size=12301) 2024-12-10T15:39:00,793 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/02082fd1eb74411c8090eef2f9227c81 2024-12-10T15:39:00,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-10T15:39:00,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/4d2b7236b6c641cdb0a7e924506227bc is 50, key is test_row_0/C:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:00,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742530_1706 (size=12301) 2024-12-10T15:39:00,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/4d2b7236b6c641cdb0a7e924506227bc 2024-12-10T15:39:00,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/db7d7d2d47c34bef80090f59f4d99745 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745 2024-12-10T15:39:00,829 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745, entries=200, sequenceid=369, filesize=39.0 K 2024-12-10T15:39:00,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/02082fd1eb74411c8090eef2f9227c81 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81 2024-12-10T15:39:00,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81, entries=150, sequenceid=369, filesize=12.0 K 2024-12-10T15:39:00,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/4d2b7236b6c641cdb0a7e924506227bc as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc 2024-12-10T15:39:00,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc, entries=150, sequenceid=369, filesize=12.0 K 2024-12-10T15:39:00,844 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for f84cee47e13bc12ff2ef81f5e007a839 in 510ms, sequenceid=369, compaction requested=true 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:00,844 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:00,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:39:00,844 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:00,845 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103277 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:00,845 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:39:00,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=100.9 K 2024-12-10T15:39:00,845 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,845 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745] 2024-12-10T15:39:00,846 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:00,846 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:39:00,846 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,846 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba3cf8793641468f8b2272359573f17e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.8 K 2024-12-10T15:39:00,846 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ecb3df8c428473dbb1c9cadb4ff3c4e, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845137980 2024-12-10T15:39:00,846 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ba3cf8793641468f8b2272359573f17e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845137980 2024-12-10T15:39:00,847 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 922a9783e8ab4f44b96fbeda639d578a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733845138038 2024-12-10T15:39:00,847 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 57816faaaa564c6ba1435076e6f091b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733845138038 2024-12-10T15:39:00,847 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db7d7d2d47c34bef80090f59f4d99745, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139189 2024-12-10T15:39:00,847 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 02082fd1eb74411c8090eef2f9227c81, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139192 2024-12-10T15:39:00,856 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#595 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:00,856 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/e2a4569c3ddd4f738b55afa96c0380d3 is 50, key is test_row_0/B:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,859 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,868 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412100f22b4f3affb4d97a99d08df2eb84bdd_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,869 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412100f22b4f3affb4d97a99d08df2eb84bdd_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,869 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100f22b4f3affb4d97a99d08df2eb84bdd_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742531_1707 (size=13221) 2024-12-10T15:39:00,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742532_1708 (size=4469) 2024-12-10T15:39:00,921 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#596 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:00,922 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/5fa93247215e4133bdf7f0b3a1591cf2 is 175, key is test_row_0/A:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742533_1709 (size=32175) 2024-12-10T15:39:00,928 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/5fa93247215e4133bdf7f0b3a1591cf2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2 2024-12-10T15:39:00,931 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 5fa93247215e4133bdf7f0b3a1591cf2(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:00,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:00,931 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845140844; duration=0sec 2024-12-10T15:39:00,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:00,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:39:00,931 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-10T15:39:00,933 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-10T15:39:00,933 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:39:00,933 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,933 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/507d2bf6bc7a46d3972705551831e833, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=48.8 K 2024-12-10T15:39:00,933 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 507d2bf6bc7a46d3972705551831e833, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1733845136814 2024-12-10T15:39:00,933 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c78a8f7e97db4dc490fd29ad7fbfa7d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1733845137980 2024-12-10T15:39:00,933 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting e04f1a57a85e428aab5b739637146bf2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1733845138038 2024-12-10T15:39:00,934 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d2b7236b6c641cdb0a7e924506227bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139192 2024-12-10T15:39:00,939 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#597 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:00,940 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/3c60dd38c6d54023ab5393c8a868d0ab is 50, key is test_row_0/C:col10/1733845139193/Put/seqid=0 2024-12-10T15:39:00,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T15:39:00,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742534_1710 (size=13187) 2024-12-10T15:39:00,959 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:00,960 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:00,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:00,963 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/3c60dd38c6d54023ab5393c8a868d0ab as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3c60dd38c6d54023ab5393c8a868d0ab 2024-12-10T15:39:00,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:00,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:00,968 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 3c60dd38c6d54023ab5393c8a868d0ab(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:00,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:00,968 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=12, startTime=1733845140844; duration=0sec 2024-12-10T15:39:00,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:00,968 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:39:00,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210744bd2868b1049829916303cb925e665_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845140351/Put/seqid=0 2024-12-10T15:39:00,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742535_1711 (size=12454) 2024-12-10T15:39:00,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:00,977 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210744bd2868b1049829916303cb925e665_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210744bd2868b1049829916303cb925e665_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:00,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/6b85e6e1beed4e8c974d54fc632491b2, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:00,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/6b85e6e1beed4e8c974d54fc632491b2 is 175, key is test_row_0/A:col10/1733845140351/Put/seqid=0 2024-12-10T15:39:00,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845200977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845200978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:00,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:00,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845200980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742536_1712 (size=31255) 2024-12-10T15:39:01,005 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=392, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/6b85e6e1beed4e8c974d54fc632491b2 2024-12-10T15:39:01,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/2530d30aed3b4f52ae7dfcd818554c7d is 50, key is test_row_0/B:col10/1733845140351/Put/seqid=0 2024-12-10T15:39:01,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742537_1713 (size=12301) 2024-12-10T15:39:01,027 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/2530d30aed3b4f52ae7dfcd818554c7d 2024-12-10T15:39:01,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/663cf957c9c7450fa6716eb0fac0ffce is 50, key is test_row_0/C:col10/1733845140351/Put/seqid=0 2024-12-10T15:39:01,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742538_1714 (size=12301) 2024-12-10T15:39:01,045 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/663cf957c9c7450fa6716eb0fac0ffce 2024-12-10T15:39:01,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/6b85e6e1beed4e8c974d54fc632491b2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2 2024-12-10T15:39:01,054 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2, entries=150, sequenceid=392, filesize=30.5 K 2024-12-10T15:39:01,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/2530d30aed3b4f52ae7dfcd818554c7d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d 2024-12-10T15:39:01,057 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d, entries=150, sequenceid=392, filesize=12.0 K 2024-12-10T15:39:01,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/663cf957c9c7450fa6716eb0fac0ffce as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce 2024-12-10T15:39:01,061 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce, entries=150, sequenceid=392, filesize=12.0 K 2024-12-10T15:39:01,061 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for f84cee47e13bc12ff2ef81f5e007a839 in 101ms, sequenceid=392, compaction requested=false 2024-12-10T15:39:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-10T15:39:01,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-10T15:39:01,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-10T15:39:01,063 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 722 msec 2024-12-10T15:39:01,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 724 msec 2024-12-10T15:39:01,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:01,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:01,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:01,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bb21ac861013473f84d275f3fccc973c_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:01,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742539_1715 (size=14994) 2024-12-10T15:39:01,092 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:01,095 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bb21ac861013473f84d275f3fccc973c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb21ac861013473f84d275f3fccc973c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:01,096 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/0aa379dfe7ac4f6eb72de92b7cd495b2, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:01,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/0aa379dfe7ac4f6eb72de92b7cd495b2 is 175, key is test_row_0/A:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:01,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742540_1716 (size=39949) 2024-12-10T15:39:01,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845201100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845201102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845201103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845201204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845201205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845201205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,301 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/e2a4569c3ddd4f738b55afa96c0380d3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e2a4569c3ddd4f738b55afa96c0380d3 2024-12-10T15:39:01,305 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into e2a4569c3ddd4f738b55afa96c0380d3(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:01,305 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:01,305 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845140844; duration=0sec 2024-12-10T15:39:01,305 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:01,305 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:39:01,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845201407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845201407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845201408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-10T15:39:01,444 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-10T15:39:01,445 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-10T15:39:01,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-10T15:39:01,447 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-10T15:39:01,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:01,447 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-10T15:39:01,447 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-10T15:39:01,502 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=410, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/0aa379dfe7ac4f6eb72de92b7cd495b2 2024-12-10T15:39:01,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/606c12ebe50749e0833e56ba5ed24a55 is 50, key is test_row_0/B:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:01,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742541_1717 (size=12301) 2024-12-10T15:39:01,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:01,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:01,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:01,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,599 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845201710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845201711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:01,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845201712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:01,751 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:01,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:01,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:01,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:01,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:01,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:01,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:01,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/606c12ebe50749e0833e56ba5ed24a55 2024-12-10T15:39:01,914 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/db91c748317e4555bc906a004d0ff3f7 is 50, key is test_row_0/C:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:01,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742542_1718 (size=12301) 2024-12-10T15:39:02,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:02,055 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,207 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:02,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. as already flushing 2024-12-10T15:39:02,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:02,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845202214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:02,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845202215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:02,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845202218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-10T15:39:02,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/db91c748317e4555bc906a004d0ff3f7 2024-12-10T15:39:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/0aa379dfe7ac4f6eb72de92b7cd495b2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2 2024-12-10T15:39:02,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2, entries=200, sequenceid=410, filesize=39.0 K 2024-12-10T15:39:02,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/606c12ebe50749e0833e56ba5ed24a55 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55 2024-12-10T15:39:02,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55, entries=150, sequenceid=410, filesize=12.0 K 2024-12-10T15:39:02,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/db91c748317e4555bc906a004d0ff3f7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7 2024-12-10T15:39:02,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7, entries=150, sequenceid=410, filesize=12.0 K 2024-12-10T15:39:02,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for f84cee47e13bc12ff2ef81f5e007a839 in 1248ms, sequenceid=410, compaction requested=true 2024-12-10T15:39:02,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:02,332 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:02,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:39:02,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:02,332 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:39:02,333 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,333 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e2a4569c3ddd4f738b55afa96c0380d3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.9 K 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:39:02,333 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,333 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=101.0 K 2024-12-10T15:39:02,333 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2] 2024-12-10T15:39:02,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:39:02,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:02,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:39:02,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:02,333 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e2a4569c3ddd4f738b55afa96c0380d3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139192 2024-12-10T15:39:02,334 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fa93247215e4133bdf7f0b3a1591cf2, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139192 2024-12-10T15:39:02,334 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 2530d30aed3b4f52ae7dfcd818554c7d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733845140351 2024-12-10T15:39:02,334 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b85e6e1beed4e8c974d54fc632491b2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733845140351 2024-12-10T15:39:02,334 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 606c12ebe50749e0833e56ba5ed24a55, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:02,334 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0aa379dfe7ac4f6eb72de92b7cd495b2, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:02,341 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:02,341 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#604 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:02,342 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ee23cdd9422947ba9b6304eaa729de5e is 50, key is test_row_0/B:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:02,345 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412106aeca1e86dce43159c650b8cbc255f76_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:02,346 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412106aeca1e86dce43159c650b8cbc255f76_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:02,346 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412106aeca1e86dce43159c650b8cbc255f76_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:02,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742544_1720 (size=4469) 2024-12-10T15:39:02,359 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#605 average throughput is 1.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:02,360 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/32bf349d84044c6a82e8bfe665816841 is 175, key is test_row_0/A:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742545_1721 (size=32277) 2024-12-10T15:39:02,370 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:02,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46239 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,371 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-10T15:39:02,371 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/32bf349d84044c6a82e8bfe665816841 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:02,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:02,375 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 32bf349d84044c6a82e8bfe665816841(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:02,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:02,375 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845142332; duration=0sec 2024-12-10T15:39:02,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:02,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:39:02,375 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:02,376 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:02,376 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:39:02,376 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,376 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3c60dd38c6d54023ab5393c8a868d0ab, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=36.9 K 2024-12-10T15:39:02,376 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c60dd38c6d54023ab5393c8a868d0ab, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733845139192 2024-12-10T15:39:02,376 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 663cf957c9c7450fa6716eb0fac0ffce, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1733845140351 2024-12-10T15:39:02,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742543_1719 (size=13323) 2024-12-10T15:39:02,377 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting db91c748317e4555bc906a004d0ff3f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:02,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121005387282ea9541acadf906a9ad372f44_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845141098/Put/seqid=0 2024-12-10T15:39:02,406 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#607 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:02,406 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/d1d2c304381b43158d72aa0fa5384fea is 50, key is test_row_0/C:col10/1733845141083/Put/seqid=0 2024-12-10T15:39:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742546_1722 (size=12454) 2024-12-10T15:39:02,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,423 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121005387282ea9541acadf906a9ad372f44_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005387282ea9541acadf906a9ad372f44_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:02,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ebb6fd702e754883978f284d40496bfe, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:02,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ebb6fd702e754883978f284d40496bfe is 175, key is test_row_0/A:col10/1733845141098/Put/seqid=0 2024-12-10T15:39:02,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742547_1723 (size=13289) 2024-12-10T15:39:02,431 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/d1d2c304381b43158d72aa0fa5384fea as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d1d2c304381b43158d72aa0fa5384fea 2024-12-10T15:39:02,435 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into d1d2c304381b43158d72aa0fa5384fea(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:02,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:02,435 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845142333; duration=0sec 2024-12-10T15:39:02,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:02,435 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:39:02,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742548_1724 (size=31255) 2024-12-10T15:39:02,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:02,781 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/ee23cdd9422947ba9b6304eaa729de5e as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ee23cdd9422947ba9b6304eaa729de5e 2024-12-10T15:39:02,784 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into ee23cdd9422947ba9b6304eaa729de5e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:02,784 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:02,784 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845142332; duration=0sec 2024-12-10T15:39:02,784 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:02,784 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:39:02,845 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=430, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ebb6fd702e754883978f284d40496bfe 2024-12-10T15:39:02,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/e10a2184fd974f4785a95ed9e60e1bde is 50, key is test_row_0/B:col10/1733845141098/Put/seqid=0 2024-12-10T15:39:02,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742549_1725 (size=12301) 2024-12-10T15:39:02,853 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/e10a2184fd974f4785a95ed9e60e1bde 2024-12-10T15:39:02,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/dc8f8d937cbf4bdc9820be1c7332ff4a is 50, key is test_row_0/C:col10/1733845141098/Put/seqid=0 2024-12-10T15:39:02,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742550_1726 (size=12301) 2024-12-10T15:39:02,863 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/dc8f8d937cbf4bdc9820be1c7332ff4a 2024-12-10T15:39:02,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/ebb6fd702e754883978f284d40496bfe as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe 2024-12-10T15:39:02,870 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe, entries=150, sequenceid=430, filesize=30.5 K 2024-12-10T15:39:02,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/e10a2184fd974f4785a95ed9e60e1bde as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde 2024-12-10T15:39:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,874 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde, entries=150, sequenceid=430, filesize=12.0 K 2024-12-10T15:39:02,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/dc8f8d937cbf4bdc9820be1c7332ff4a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a, entries=150, sequenceid=430, filesize=12.0 K 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 INFO [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=0 B/0 for f84cee47e13bc12ff2ef81f5e007a839 in 508ms, sequenceid=430, compaction requested=false 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:02,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:02,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/bf0fec90ff6d:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-10T15:39:02,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4330 sec 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.4370 sec 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,269 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:03,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:03,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,277 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068d666a62b22406398c2da89574e71e8_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:03,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39492 deadline: 1733845203295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:03,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:03,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39528 deadline: 1733845203295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:03,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742552_1728 (size=25158) 2024-12-10T15:39:03,300 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-10T15:39:03,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46239 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39518 deadline: 1733845203297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:03,303 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121068d666a62b22406398c2da89574e71e8_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068d666a62b22406398c2da89574e71e8_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:03,304 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/c883f944aacd479e960ec4847cfd2650, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/c883f944aacd479e960ec4847cfd2650 is 175, key is test_row_0/A:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742551_1727 (size=74795) 2024-12-10T15:39:03,308 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=445, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/c883f944aacd479e960ec4847cfd2650 2024-12-10T15:39:03,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6865de368e154cdb82d75d4ecb78c2fe is 50, key is test_row_0/B:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742553_1729 (size=12301) 2024-12-10T15:39:03,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6865de368e154cdb82d75d4ecb78c2fe 2024-12-10T15:39:03,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/94293f70ec0f4c0d83b6d4907a560c6d is 50, key is test_row_0/C:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742554_1730 (size=12301) 2024-12-10T15:39:03,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/94293f70ec0f4c0d83b6d4907a560c6d 2024-12-10T15:39:03,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/c883f944aacd479e960ec4847cfd2650 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650 2024-12-10T15:39:03,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650, entries=400, sequenceid=445, filesize=73.0 K 2024-12-10T15:39:03,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/6865de368e154cdb82d75d4ecb78c2fe as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe 2024-12-10T15:39:03,359 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe, entries=150, sequenceid=445, filesize=12.0 K 2024-12-10T15:39:03,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/94293f70ec0f4c0d83b6d4907a560c6d as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d 2024-12-10T15:39:03,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d, entries=150, sequenceid=445, filesize=12.0 K 2024-12-10T15:39:03,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f84cee47e13bc12ff2ef81f5e007a839 in 96ms, sequenceid=445, compaction requested=true 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:A, priority=-2147483648, current under compaction store size is 1 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:03,364 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:03,364 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:B, priority=-2147483648, current under compaction store size is 2 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f84cee47e13bc12ff2ef81f5e007a839:C, priority=-2147483648, current under compaction store size is 3 2024-12-10T15:39:03,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:03,367 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 138327 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:03,367 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/A is initiating minor compaction (all files) 2024-12-10T15:39:03,367 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/A in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:03,367 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=135.1 K 2024-12-10T15:39:03,367 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:03,367 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. files: [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650] 2024-12-10T15:39:03,369 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:03,369 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/B is initiating minor compaction (all files) 2024-12-10T15:39:03,369 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/B in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:03,369 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ee23cdd9422947ba9b6304eaa729de5e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=37.0 K 2024-12-10T15:39:03,369 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32bf349d84044c6a82e8bfe665816841, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:03,369 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting ee23cdd9422947ba9b6304eaa729de5e, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:03,370 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting ebb6fd702e754883978f284d40496bfe, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845141098 2024-12-10T15:39:03,370 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting e10a2184fd974f4785a95ed9e60e1bde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845141098 2024-12-10T15:39:03,370 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] compactions.Compactor(224): Compacting c883f944aacd479e960ec4847cfd2650, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733845143243 2024-12-10T15:39:03,370 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 6865de368e154cdb82d75d4ecb78c2fe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733845143262 2024-12-10T15:39:03,372 DEBUG [Thread-2657 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x474dec36 to 127.0.0.1:56346 2024-12-10T15:39:03,372 DEBUG [Thread-2657 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,373 DEBUG [Thread-2659 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68dbad25 to 127.0.0.1:56346 2024-12-10T15:39:03,373 DEBUG [Thread-2659 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,374 DEBUG [Thread-2655 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67adb273 to 127.0.0.1:56346 2024-12-10T15:39:03,374 DEBUG [Thread-2655 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,375 DEBUG [Thread-2661 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d2c412e to 127.0.0.1:56346 2024-12-10T15:39:03,375 DEBUG [Thread-2661 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,376 DEBUG [Thread-2653 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x410bf0c8 to 127.0.0.1:56346 2024-12-10T15:39:03,376 DEBUG [Thread-2653 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,381 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#B#compaction#613 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:03,381 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/89766e72e02d44ddb7a29ba2715eead2 is 50, key is test_row_0/B:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,384 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,390 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241210119ec464b39e43579d44f14a21b5c23a_f84cee47e13bc12ff2ef81f5e007a839 store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,393 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241210119ec464b39e43579d44f14a21b5c23a_f84cee47e13bc12ff2ef81f5e007a839, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,393 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210119ec464b39e43579d44f14a21b5c23a_f84cee47e13bc12ff2ef81f5e007a839 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742555_1731 (size=13425) 2024-12-10T15:39:03,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46239 {}] regionserver.HRegion(8581): Flush requested on f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:03,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-10T15:39:03,400 DEBUG [Thread-2650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68c2838a to 127.0.0.1:56346 2024-12-10T15:39:03,400 DEBUG [Thread-2648 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cfdf76c to 127.0.0.1:56346 2024-12-10T15:39:03,400 DEBUG [Thread-2650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,400 DEBUG [Thread-2648 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:03,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:03,403 DEBUG [Thread-2646 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:56346 2024-12-10T15:39:03,403 DEBUG [Thread-2646 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:03,405 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/89766e72e02d44ddb7a29ba2715eead2 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/89766e72e02d44ddb7a29ba2715eead2 2024-12-10T15:39:03,408 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/B of f84cee47e13bc12ff2ef81f5e007a839 into 89766e72e02d44ddb7a29ba2715eead2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:03,408 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/B, priority=13, startTime=1733845143364; duration=0sec 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:B 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-10T15:39:03,408 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1540): f84cee47e13bc12ff2ef81f5e007a839/C is initiating minor compaction (all files) 2024-12-10T15:39:03,409 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f84cee47e13bc12ff2ef81f5e007a839/C in TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:03,409 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d1d2c304381b43158d72aa0fa5384fea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d] into tmpdir=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp, totalSize=37.0 K 2024-12-10T15:39:03,409 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d2c304381b43158d72aa0fa5384fea, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1733845140976 2024-12-10T15:39:03,409 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting dc8f8d937cbf4bdc9820be1c7332ff4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733845141098 2024-12-10T15:39:03,409 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] compactions.Compactor(224): Compacting 94293f70ec0f4c0d83b6d4907a560c6d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1733845143262 2024-12-10T15:39:03,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742556_1732 (size=4469) 2024-12-10T15:39:03,421 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#C#compaction#615 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:03,421 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/8c013c34840d4b4aab2f1167f6ccc663 is 50, key is test_row_0/C:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,423 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f84cee47e13bc12ff2ef81f5e007a839#A#compaction#614 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-10T15:39:03,423 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/5dea1eaaf3fa4b889f102b0fad83c48c is 175, key is test_row_0/A:col10/1733845143262/Put/seqid=0 2024-12-10T15:39:03,431 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bbcb23392dd9482789c16f6b657e07e1_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845143296/Put/seqid=0 2024-12-10T15:39:03,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742557_1733 (size=13391) 2024-12-10T15:39:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742558_1734 (size=32379) 2024-12-10T15:39:03,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742559_1735 (size=12454) 2024-12-10T15:39:03,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-10T15:39:03,550 INFO [Thread-2652 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-10T15:39:03,841 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/8c013c34840d4b4aab2f1167f6ccc663 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8c013c34840d4b4aab2f1167f6ccc663 2024-12-10T15:39:03,841 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:03,844 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/C of f84cee47e13bc12ff2ef81f5e007a839 into 8c013c34840d4b4aab2f1167f6ccc663(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:03,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:03,844 INFO [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/C, priority=13, startTime=1733845143364; duration=0sec 2024-12-10T15:39:03,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:03,844 DEBUG [RS:0;bf0fec90ff6d:46239-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:C 2024-12-10T15:39:03,848 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241210bbcb23392dd9482789c16f6b657e07e1_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbcb23392dd9482789c16f6b657e07e1_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:03,849 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/5dea1eaaf3fa4b889f102b0fad83c48c as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5dea1eaaf3fa4b889f102b0fad83c48c 2024-12-10T15:39:03,849 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f20591073fc24ac8a6dc4a386b50ce8a, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:03,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f20591073fc24ac8a6dc4a386b50ce8a is 175, key is test_row_0/A:col10/1733845143296/Put/seqid=0 2024-12-10T15:39:03,852 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f84cee47e13bc12ff2ef81f5e007a839/A of f84cee47e13bc12ff2ef81f5e007a839 into 5dea1eaaf3fa4b889f102b0fad83c48c(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-10T15:39:03,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:03,852 INFO [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839., storeName=f84cee47e13bc12ff2ef81f5e007a839/A, priority=13, startTime=1733845143364; duration=0sec 2024-12-10T15:39:03,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-10T15:39:03,852 DEBUG [RS:0;bf0fec90ff6d:46239-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f84cee47e13bc12ff2ef81f5e007a839:A 2024-12-10T15:39:03,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742560_1736 (size=31255) 2024-12-10T15:39:04,257 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=471, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f20591073fc24ac8a6dc4a386b50ce8a 2024-12-10T15:39:04,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/310cb5e5b2c14069b68ddef04800374a is 50, key is test_row_0/B:col10/1733845143296/Put/seqid=0 2024-12-10T15:39:04,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742561_1737 (size=12301) 2024-12-10T15:39:04,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/310cb5e5b2c14069b68ddef04800374a 2024-12-10T15:39:04,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/5d9a7483a60143cf951b31eab3dad947 is 50, key is test_row_0/C:col10/1733845143296/Put/seqid=0 2024-12-10T15:39:04,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742562_1738 (size=12301) 2024-12-10T15:39:05,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/5d9a7483a60143cf951b31eab3dad947 2024-12-10T15:39:05,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/f20591073fc24ac8a6dc4a386b50ce8a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f20591073fc24ac8a6dc4a386b50ce8a 2024-12-10T15:39:05,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f20591073fc24ac8a6dc4a386b50ce8a, entries=150, sequenceid=471, filesize=30.5 K 2024-12-10T15:39:05,079 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/310cb5e5b2c14069b68ddef04800374a as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/310cb5e5b2c14069b68ddef04800374a 2024-12-10T15:39:05,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/310cb5e5b2c14069b68ddef04800374a, entries=150, sequenceid=471, filesize=12.0 K 2024-12-10T15:39:05,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/5d9a7483a60143cf951b31eab3dad947 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/5d9a7483a60143cf951b31eab3dad947 2024-12-10T15:39:05,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/5d9a7483a60143cf951b31eab3dad947, entries=150, sequenceid=471, filesize=12.0 K 2024-12-10T15:39:05,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=6.71 KB/6870 for f84cee47e13bc12ff2ef81f5e007a839 in 1683ms, sequenceid=471, compaction requested=false 2024-12-10T15:39:05,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:07,211 DEBUG [Thread-2644 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:56346 2024-12-10T15:39:07,211 DEBUG [Thread-2644 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:08,940 DEBUG [Thread-2642 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:56346 2024-12-10T15:39:08,940 DEBUG [Thread-2642 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 27 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 26 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 99 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6155 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6016 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5965 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6273 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6089 2024-12-10T15:39:08,940 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-10T15:39:08,940 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:39:08,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:56346 2024-12-10T15:39:08,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:08,941 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-10T15:39:08,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-10T15:39:08,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:08,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:08,944 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845148944"}]},"ts":"1733845148944"} 2024-12-10T15:39:08,944 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-10T15:39:08,996 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-10T15:39:08,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-10T15:39:08,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=179, ppid=178, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, UNASSIGN}] 2024-12-10T15:39:08,997 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=179, ppid=178, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, UNASSIGN 2024-12-10T15:39:08,998 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=179 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=CLOSING, regionLocation=bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:08,998 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-10T15:39:08,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; CloseRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049}] 2024-12-10T15:39:09,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:09,149 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:09,150 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(124): Close f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1681): Closing f84cee47e13bc12ff2ef81f5e007a839, disabling compactions & flushes 2024-12-10T15:39:09,150 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. after waiting 0 ms 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:09,150 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(2837): Flushing f84cee47e13bc12ff2ef81f5e007a839 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=A 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=B 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f84cee47e13bc12ff2ef81f5e007a839, store=C 2024-12-10T15:39:09,150 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-10T15:39:09,154 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100f8ace96a2ee4d2b91e5676d5613cdad_f84cee47e13bc12ff2ef81f5e007a839 is 50, key is test_row_0/A:col10/1733845147210/Put/seqid=0 2024-12-10T15:39:09,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742563_1739 (size=12454) 2024-12-10T15:39:09,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:09,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:09,562 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-10T15:39:09,565 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412100f8ace96a2ee4d2b91e5676d5613cdad_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100f8ace96a2ee4d2b91e5676d5613cdad_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:09,566 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/9e3984c97d95476097a8a37713b79cce, store: [table=TestAcidGuarantees family=A region=f84cee47e13bc12ff2ef81f5e007a839] 2024-12-10T15:39:09,566 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/9e3984c97d95476097a8a37713b79cce is 175, key is test_row_0/A:col10/1733845147210/Put/seqid=0 2024-12-10T15:39:09,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742564_1740 (size=31255) 2024-12-10T15:39:09,973 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=480, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/9e3984c97d95476097a8a37713b79cce 2024-12-10T15:39:09,977 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8d5d2b8581044b5f9b5d3c76f69106c7 is 50, key is test_row_0/B:col10/1733845147210/Put/seqid=0 2024-12-10T15:39:09,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742565_1741 (size=12301) 2024-12-10T15:39:10,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:10,390 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8d5d2b8581044b5f9b5d3c76f69106c7 2024-12-10T15:39:10,395 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/7604acb66135479c8ee597c0097031b3 is 50, key is test_row_0/C:col10/1733845147210/Put/seqid=0 2024-12-10T15:39:10,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742566_1742 (size=12301) 2024-12-10T15:39:10,798 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=480 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/7604acb66135479c8ee597c0097031b3 2024-12-10T15:39:10,802 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/A/9e3984c97d95476097a8a37713b79cce as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/9e3984c97d95476097a8a37713b79cce 2024-12-10T15:39:10,805 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/9e3984c97d95476097a8a37713b79cce, entries=150, sequenceid=480, filesize=30.5 K 2024-12-10T15:39:10,805 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/B/8d5d2b8581044b5f9b5d3c76f69106c7 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8d5d2b8581044b5f9b5d3c76f69106c7 2024-12-10T15:39:10,809 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8d5d2b8581044b5f9b5d3c76f69106c7, entries=150, sequenceid=480, filesize=12.0 K 2024-12-10T15:39:10,809 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/.tmp/C/7604acb66135479c8ee597c0097031b3 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7604acb66135479c8ee597c0097031b3 2024-12-10T15:39:10,813 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7604acb66135479c8ee597c0097031b3, entries=150, sequenceid=480, filesize=12.0 K 2024-12-10T15:39:10,814 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for f84cee47e13bc12ff2ef81f5e007a839 in 1664ms, sequenceid=480, compaction requested=true 2024-12-10T15:39:10,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650] to archive 2024-12-10T15:39:10,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:39:10,828 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7badb19800024adfae93fa05b0c23d5f 2024-12-10T15:39:10,828 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cff3e4b0f06f49a089f4c929f49fd9a2 2024-12-10T15:39:10,828 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/654d6100dad64c10aac35dfcc59a98b3 2024-12-10T15:39:10,829 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/52f5bbb2d3d6432f963289f4dcef416a 2024-12-10T15:39:10,830 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ef81c4c8c67e468f988e60b72224d1be 2024-12-10T15:39:10,830 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/7c21663bf95e43048b551c259743140d 2024-12-10T15:39:10,836 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b016ec6dc741418ab42e9c52411e19e8 2024-12-10T15:39:10,836 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/855874f31f35486184acc51575fd8e2f 2024-12-10T15:39:10,836 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/51169a5260ff458c87685887e99624d0 2024-12-10T15:39:10,836 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/08e06894b71444df982a8eae72d890cb 2024-12-10T15:39:10,837 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/649184c841e048eeaf32f7766d756c71 2024-12-10T15:39:10,837 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/80decb51f3ee455c80dcd1ab5e17a24b 2024-12-10T15:39:10,837 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/cd5028483e0f4f56baa34a9ec3f27614 2024-12-10T15:39:10,838 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ac1a895025c4462ba8b75688b419abb6 2024-12-10T15:39:10,838 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f6b7d770ec3549d4a30a650d4753e350 2024-12-10T15:39:10,839 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/1bdb2fcc2e674ae69d2424cccca3068d 2024-12-10T15:39:10,840 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/e6d4062f06014dea9fcf882cce368eb6 2024-12-10T15:39:10,840 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/55d22c14640149848a7a1311b882a3c6 2024-12-10T15:39:10,840 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/269ca785173342e48780004d4f0ddaea 2024-12-10T15:39:10,840 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829f1dafc107466eb0680428b2944d0a 2024-12-10T15:39:10,840 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/95fc47fdd28f4913a7afdb2af003aea4 2024-12-10T15:39:10,841 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/829a0afe63044f9d8017c28e744ba8c9 2024-12-10T15:39:10,842 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/b4fe46727bd34ab3960095f8498bcd03 2024-12-10T15:39:10,842 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/922a9783e8ab4f44b96fbeda639d578a 2024-12-10T15:39:10,842 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/a5bc7e6a41f6402b8a16fec7ffc5e27a 2024-12-10T15:39:10,843 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/6b85e6e1beed4e8c974d54fc632491b2 2024-12-10T15:39:10,843 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/0aa379dfe7ac4f6eb72de92b7cd495b2 2024-12-10T15:39:10,843 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/32bf349d84044c6a82e8bfe665816841 2024-12-10T15:39:10,844 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/ebb6fd702e754883978f284d40496bfe 2024-12-10T15:39:10,844 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5fa93247215e4133bdf7f0b3a1591cf2 2024-12-10T15:39:10,844 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/c883f944aacd479e960ec4847cfd2650 2024-12-10T15:39:10,851 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/3ecb3df8c428473dbb1c9cadb4ff3c4e 2024-12-10T15:39:10,851 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/db7d7d2d47c34bef80090f59f4d99745 2024-12-10T15:39:10,853 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/eb29c0d0de044acea9230e34315cda96, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/74e9ac07f15c4479b8629d350fc9b5dd, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ed75e686dab4566aa8e2945681c64b4, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1dc70abfa7244ae7878167fb2f430f80, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a1fd7a137e3b413fb5e98362bd79e313, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/f2ca0a693b6d4e0080b14fcdde38452c, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/67e2769706fd4f0bb1d6e09eeffb947b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba3cf8793641468f8b2272359573f17e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e2a4569c3ddd4f738b55afa96c0380d3, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ee23cdd9422947ba9b6304eaa729de5e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe] to archive 2024-12-10T15:39:10,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:39:10,856 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/eb29c0d0de044acea9230e34315cda96 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/eb29c0d0de044acea9230e34315cda96 2024-12-10T15:39:10,856 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/53d5323b2562406886107489bb3b13b6 2024-12-10T15:39:10,856 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/988862916eae4ab08128397d0a35a658 2024-12-10T15:39:10,856 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/c14c1272a7d74b268841fa14e95424ea 2024-12-10T15:39:10,858 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8f960cdbaafb4bae809d02cfc1f30d82 2024-12-10T15:39:10,858 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/74e9ac07f15c4479b8629d350fc9b5dd to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/74e9ac07f15c4479b8629d350fc9b5dd 2024-12-10T15:39:10,858 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/39865f468c014c5cbef1def540a2eab9 2024-12-10T15:39:10,863 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6999056375af498ca477bda998c29996 2024-12-10T15:39:10,863 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/9cc2e9e6d5c149d197ca0f1f1019f525 2024-12-10T15:39:10,868 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ed75e686dab4566aa8e2945681c64b4 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ed75e686dab4566aa8e2945681c64b4 2024-12-10T15:39:10,879 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1dc70abfa7244ae7878167fb2f430f80 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1dc70abfa7244ae7878167fb2f430f80 2024-12-10T15:39:10,879 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3adc69728e5b4cf3a81c995ba00efa09 2024-12-10T15:39:10,883 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/88cbfbb3240f4b7bba2b48be9b02951b 2024-12-10T15:39:10,883 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba497316a0a3437b81e4d3edfbfdbb70 2024-12-10T15:39:10,883 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6a23261a6e204611a6a333736a9b8b0c 2024-12-10T15:39:10,887 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a2b60b1eb3394124abf6146d4235dffc 2024-12-10T15:39:10,887 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/f2ca0a693b6d4e0080b14fcdde38452c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/f2ca0a693b6d4e0080b14fcdde38452c 2024-12-10T15:39:10,888 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/3413358f132c49589fad32031b166eae 2024-12-10T15:39:10,888 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a1fd7a137e3b413fb5e98362bd79e313 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/a1fd7a137e3b413fb5e98362bd79e313 2024-12-10T15:39:10,889 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8ca8dbb97d834dc49b7b2e673329a10b 2024-12-10T15:39:10,889 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/1800042451ce441c9d732adf31bfde01 2024-12-10T15:39:10,890 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba3cf8793641468f8b2272359573f17e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ba3cf8793641468f8b2272359573f17e 2024-12-10T15:39:10,890 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/57816faaaa564c6ba1435076e6f091b6 2024-12-10T15:39:10,890 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/17ce487f888e467d88724699d3c5e5f2 2024-12-10T15:39:10,892 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/02082fd1eb74411c8090eef2f9227c81 2024-12-10T15:39:10,892 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/2530d30aed3b4f52ae7dfcd818554c7d 2024-12-10T15:39:10,892 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e2a4569c3ddd4f738b55afa96c0380d3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e2a4569c3ddd4f738b55afa96c0380d3 2024-12-10T15:39:10,895 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/0372762de69f4d3ea7f6efbc8f0fcd44 2024-12-10T15:39:10,896 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/606c12ebe50749e0833e56ba5ed24a55 2024-12-10T15:39:10,896 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ee23cdd9422947ba9b6304eaa729de5e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/ee23cdd9422947ba9b6304eaa729de5e 2024-12-10T15:39:10,899 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/67e2769706fd4f0bb1d6e09eeffb947b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/67e2769706fd4f0bb1d6e09eeffb947b 2024-12-10T15:39:10,909 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/6865de368e154cdb82d75d4ecb78c2fe 2024-12-10T15:39:10,911 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/e10a2184fd974f4785a95ed9e60e1bde 2024-12-10T15:39:10,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c27ee34a0a084bfaa514c1262f48519d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7fd382259201484f9a89e568e5184233, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/1cb6e26af32c4f7cb6df852a6f0494ac, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/940fc0445ead4740b88392a50cb2722f, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c9b499b7e4d54b1b882422a4b51720a0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8d501055a40c44eeac71a0065c3ee630, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/507d2bf6bc7a46d3972705551831e833, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3c60dd38c6d54023ab5393c8a868d0ab, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d1d2c304381b43158d72aa0fa5384fea, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d] to archive 2024-12-10T15:39:10,915 DEBUG [StoreCloser-TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-10T15:39:10,925 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f6d8cf2c8fb54c648e6b3695d469f1f7 2024-12-10T15:39:10,926 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d7b133ee4e604a9caf2ec8ecfa484810 2024-12-10T15:39:10,926 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e02de74377be4919ab63dce3a6e53a4e 2024-12-10T15:39:10,926 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/362f02823428483088891f34749c86f0 2024-12-10T15:39:10,926 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c27ee34a0a084bfaa514c1262f48519d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c27ee34a0a084bfaa514c1262f48519d 2024-12-10T15:39:10,926 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6c07267402a342c1aeebaca699c81814 2024-12-10T15:39:10,927 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/cc1e8a5d1a424cc59adfc89ff897072d 2024-12-10T15:39:10,929 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0de6c751d804422d8035ebb7869c7520 2024-12-10T15:39:10,929 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e87df0ff98034ac39836791624754c89 2024-12-10T15:39:10,929 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/940fc0445ead4740b88392a50cb2722f to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/940fc0445ead4740b88392a50cb2722f 2024-12-10T15:39:10,930 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/1cb6e26af32c4f7cb6df852a6f0494ac to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/1cb6e26af32c4f7cb6df852a6f0494ac 2024-12-10T15:39:10,930 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/2bbd6ec28735420a90144c45deeadd03 2024-12-10T15:39:10,931 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/0b7beb0246fd46fe9f9c33213de15803 2024-12-10T15:39:10,932 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/52637347331341e98b9f7c074c8bf1c2 2024-12-10T15:39:10,932 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/f3aa33eec8fd47d891de68191b2b9e48 2024-12-10T15:39:10,932 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8d501055a40c44eeac71a0065c3ee630 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8d501055a40c44eeac71a0065c3ee630 2024-12-10T15:39:10,932 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/a2f4332139414f69877fcc31699fb821 2024-12-10T15:39:10,933 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7fd382259201484f9a89e568e5184233 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7fd382259201484f9a89e568e5184233 2024-12-10T15:39:10,934 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/6a39ce8c502f46eaaaeee675d1909c27 2024-12-10T15:39:10,934 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3182a33732f047938f8d2fc159182788 2024-12-10T15:39:10,936 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c78a8f7e97db4dc490fd29ad7fbfa7d0 2024-12-10T15:39:10,936 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/46d78757d5f94fc8a862118d018b544b 2024-12-10T15:39:10,936 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/507d2bf6bc7a46d3972705551831e833 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/507d2bf6bc7a46d3972705551831e833 2024-12-10T15:39:10,937 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3c60dd38c6d54023ab5393c8a868d0ab to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/3c60dd38c6d54023ab5393c8a868d0ab 2024-12-10T15:39:10,937 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/e04f1a57a85e428aab5b739637146bf2 2024-12-10T15:39:10,937 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c9b499b7e4d54b1b882422a4b51720a0 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/c9b499b7e4d54b1b882422a4b51720a0 2024-12-10T15:39:10,938 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/663cf957c9c7450fa6716eb0fac0ffce 2024-12-10T15:39:10,938 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/4d2b7236b6c641cdb0a7e924506227bc 2024-12-10T15:39:10,939 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/db91c748317e4555bc906a004d0ff3f7 2024-12-10T15:39:10,940 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d1d2c304381b43158d72aa0fa5384fea to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/d1d2c304381b43158d72aa0fa5384fea 2024-12-10T15:39:10,941 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/94293f70ec0f4c0d83b6d4907a560c6d 2024-12-10T15:39:10,941 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/dc8f8d937cbf4bdc9820be1c7332ff4a 2024-12-10T15:39:10,947 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits/483.seqid, newMaxSeqId=483, maxSeqId=4 2024-12-10T15:39:10,947 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839. 2024-12-10T15:39:10,947 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1635): Region close journal for f84cee47e13bc12ff2ef81f5e007a839: 2024-12-10T15:39:10,948 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(170): Closed f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:10,949 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=179 updating hbase:meta row=f84cee47e13bc12ff2ef81f5e007a839, regionState=CLOSED 2024-12-10T15:39:10,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-10T15:39:10,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseRegionProcedure f84cee47e13bc12ff2ef81f5e007a839, server=bf0fec90ff6d,46239,1733844953049 in 1.9540 sec 2024-12-10T15:39:10,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=179, resume processing ppid=178 2024-12-10T15:39:10,955 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, ppid=178, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f84cee47e13bc12ff2ef81f5e007a839, UNASSIGN in 1.9570 sec 2024-12-10T15:39:10,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-10T15:39:10,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9590 sec 2024-12-10T15:39:10,956 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733845150956"}]},"ts":"1733845150956"} 2024-12-10T15:39:10,957 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-10T15:39:10,996 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-10T15:39:10,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0540 sec 2024-12-10T15:39:11,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-10T15:39:11,047 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-10T15:39:11,047 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-10T15:39:11,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,048 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=181, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,049 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=181, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-10T15:39:11,058 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,064 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C, FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits] 2024-12-10T15:39:11,071 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f20591073fc24ac8a6dc4a386b50ce8a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/f20591073fc24ac8a6dc4a386b50ce8a 2024-12-10T15:39:11,071 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/9e3984c97d95476097a8a37713b79cce to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/9e3984c97d95476097a8a37713b79cce 2024-12-10T15:39:11,071 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5dea1eaaf3fa4b889f102b0fad83c48c to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/A/5dea1eaaf3fa4b889f102b0fad83c48c 2024-12-10T15:39:11,073 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/310cb5e5b2c14069b68ddef04800374a to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/310cb5e5b2c14069b68ddef04800374a 2024-12-10T15:39:11,073 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/89766e72e02d44ddb7a29ba2715eead2 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/89766e72e02d44ddb7a29ba2715eead2 2024-12-10T15:39:11,073 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8d5d2b8581044b5f9b5d3c76f69106c7 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/B/8d5d2b8581044b5f9b5d3c76f69106c7 2024-12-10T15:39:11,075 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7604acb66135479c8ee597c0097031b3 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/7604acb66135479c8ee597c0097031b3 2024-12-10T15:39:11,075 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/5d9a7483a60143cf951b31eab3dad947 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/5d9a7483a60143cf951b31eab3dad947 2024-12-10T15:39:11,075 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8c013c34840d4b4aab2f1167f6ccc663 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/C/8c013c34840d4b4aab2f1167f6ccc663 2024-12-10T15:39:11,077 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits/483.seqid to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839/recovered.edits/483.seqid 2024-12-10T15:39:11,078 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/default/TestAcidGuarantees/f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,078 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-10T15:39:11,079 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:39:11,080 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-10T15:39:11,090 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005387282ea9541acadf906a9ad372f44_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121005387282ea9541acadf906a9ad372f44_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,090 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009c0c4e39d57484b82512dbe403dc29a_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121009c0c4e39d57484b82512dbe403dc29a_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,090 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100f8ace96a2ee4d2b91e5676d5613cdad_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412100f8ace96a2ee4d2b91e5676d5613cdad_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,090 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ba8e2acd1944368a85e6a5b9504c2a8_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412101ba8e2acd1944368a85e6a5b9504c2a8_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,090 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210267f6c83d2b840b5a93ac96c66bef635_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210267f6c83d2b840b5a93ac96c66bef635_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,091 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121020f81acb61794f5289f4c08eaea0418c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121020f81acb61794f5289f4c08eaea0418c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,091 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104b06c8968cdf4a4e9017d79629577a10_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412104b06c8968cdf4a4e9017d79629577a10_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,091 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a97a754dcfc4c899a4ad331fdef6cbe_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105a97a754dcfc4c899a4ad331fdef6cbe_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,092 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105b6b67a0f7cf43d495235d66a2c63615_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412105b6b67a0f7cf43d495235d66a2c63615_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,093 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068d666a62b22406398c2da89574e71e8_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121068d666a62b22406398c2da89574e71e8_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,093 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210744bd2868b1049829916303cb925e665_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210744bd2868b1049829916303cb925e665_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,093 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107154ae7ca74b4380ae9cb8248d2b4f9c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412107154ae7ca74b4380ae9cb8248d2b4f9c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,093 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121080242a90f001489a9b17283ff69beef1_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121080242a90f001489a9b17283ff69beef1_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,093 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210801ce79456024b86b5930ce5fb18bdcf_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210801ce79456024b86b5930ce5fb18bdcf_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,094 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210804eb66ba28843598c6feaa48ac48718_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210804eb66ba28843598c6feaa48ac48718_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,094 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c7402783c4d444db4b004d4a47e4b88_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412108c7402783c4d444db4b004d4a47e4b88_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,095 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095457f0e04ee4a21b098fa3458f00e00_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121095457f0e04ee4a21b098fa3458f00e00_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,095 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109c720b677fa7456b9d688fdfe0a8b476_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412109c720b677fa7456b9d688fdfe0a8b476_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,095 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a4b91f19da294093bbb79ab401d8110b_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210a4b91f19da294093bbb79ab401d8110b_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,095 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210aec1f45609ec4cc9846258854b97c164_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210aec1f45609ec4cc9846258854b97c164_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,096 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb21ac861013473f84d275f3fccc973c_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bb21ac861013473f84d275f3fccc973c_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,096 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbcb23392dd9482789c16f6b657e07e1_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210bbcb23392dd9482789c16f6b657e07e1_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,096 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce5b56b8979d4789a2972d1bf0c3fd2f_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210ce5b56b8979d4789a2972d1bf0c3fd2f_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,096 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c9ef9246e8ee46eebb7af1ebabd2c8cc_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210c9ef9246e8ee46eebb7af1ebabd2c8cc_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,096 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d66dfad9002a42ff8c8edac40b07ad02_f84cee47e13bc12ff2ef81f5e007a839 to hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241210d66dfad9002a42ff8c8edac40b07ad02_f84cee47e13bc12ff2ef81f5e007a839 2024-12-10T15:39:11,097 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-10T15:39:11,100 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=181, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,102 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-10T15:39:11,104 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-10T15:39:11,104 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=181, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,104 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-10T15:39:11,105 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733845151104"}]},"ts":"9223372036854775807"} 2024-12-10T15:39:11,112 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-10T15:39:11,112 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f84cee47e13bc12ff2ef81f5e007a839, NAME => 'TestAcidGuarantees,,1733845120158.f84cee47e13bc12ff2ef81f5e007a839.', STARTKEY => '', ENDKEY => ''}] 2024-12-10T15:39:11,112 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-10T15:39:11,113 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733845151112"}]},"ts":"9223372036854775807"} 2024-12-10T15:39:11,114 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-10T15:39:11,124 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=181, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-10T15:39:11,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 77 msec 2024-12-10T15:39:11,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33139 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-10T15:39:11,150 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-10T15:39:11,163 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=243 (was 245), OpenFileDescriptor=453 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=978 (was 1193), ProcessCount=9 (was 11), AvailableMemoryMB=4392 (was 2927) - AvailableMemoryMB LEAK? - 2024-12-10T15:39:11,164 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-10T15:39:11,164 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-10T15:39:11,164 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:56346 2024-12-10T15:39:11,164 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:11,164 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-10T15:39:11,164 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1364650780, stopped=false 2024-12-10T15:39:11,164 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=bf0fec90ff6d,33139,1733844951772 2024-12-10T15:39:11,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T15:39:11,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-10T15:39:11,175 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-10T15:39:11,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:39:11,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:39:11,176 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T15:39:11,176 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-10T15:39:11,176 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:11,177 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bf0fec90ff6d,46239,1733844953049' ***** 2024-12-10T15:39:11,177 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-10T15:39:11,177 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-10T15:39:11,177 INFO [RS:0;bf0fec90ff6d:46239 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-10T15:39:11,177 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for be7c8615eacea7669b98cff7543a195b 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1224): stopping server bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:11,178 DEBUG [RS:0;bf0fec90ff6d:46239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-10T15:39:11,178 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-10T15:39:11,178 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing be7c8615eacea7669b98cff7543a195b, disabling compactions & flushes 2024-12-10T15:39:11,179 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:39:11,179 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:39:11,179 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. after waiting 0 ms 2024-12-10T15:39:11,179 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:39:11,179 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-10T15:39:11,179 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing be7c8615eacea7669b98cff7543a195b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-10T15:39:11,179 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, be7c8615eacea7669b98cff7543a195b=hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b.} 2024-12-10T15:39:11,181 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-10T15:39:11,181 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-10T15:39:11,181 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-10T15:39:11,181 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-10T15:39:11,181 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-10T15:39:11,181 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-10T15:39:11,182 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, be7c8615eacea7669b98cff7543a195b 2024-12-10T15:39:11,199 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/.tmp/info/cbd13e0f72714161b04bfa638ec93879 is 45, key is default/info:d/1733844958239/Put/seqid=0 2024-12-10T15:39:11,207 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/info/b2ee866b2de84fad85f551f3e6109363 is 143, key is hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b./info:regioninfo/1733844958113/Put/seqid=0 2024-12-10T15:39:11,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742567_1743 (size=5037) 2024-12-10T15:39:11,218 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/.tmp/info/cbd13e0f72714161b04bfa638ec93879 2024-12-10T15:39:11,223 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/.tmp/info/cbd13e0f72714161b04bfa638ec93879 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/info/cbd13e0f72714161b04bfa638ec93879 2024-12-10T15:39:11,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742568_1744 (size=7725) 2024-12-10T15:39:11,224 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/info/b2ee866b2de84fad85f551f3e6109363 2024-12-10T15:39:11,227 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/info/cbd13e0f72714161b04bfa638ec93879, entries=2, sequenceid=6, filesize=4.9 K 2024-12-10T15:39:11,231 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for be7c8615eacea7669b98cff7543a195b in 52ms, sequenceid=6, compaction requested=false 2024-12-10T15:39:11,245 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/rep_barrier/70c8cfadce0d4fe0af6ffa3525cb3490 is 102, key is TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a./rep_barrier:/1733844984115/DeleteFamily/seqid=0 2024-12-10T15:39:11,247 INFO [regionserver/bf0fec90ff6d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T15:39:11,247 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/namespace/be7c8615eacea7669b98cff7543a195b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-10T15:39:11,248 INFO [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:39:11,248 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for be7c8615eacea7669b98cff7543a195b: 2024-12-10T15:39:11,248 DEBUG [RS_CLOSE_REGION-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733844957132.be7c8615eacea7669b98cff7543a195b. 2024-12-10T15:39:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742569_1745 (size=6025) 2024-12-10T15:39:11,253 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/rep_barrier/70c8cfadce0d4fe0af6ffa3525cb3490 2024-12-10T15:39:11,272 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/table/ed5f6a61c66b4bdd8d4ed336103270d6 is 96, key is TestAcidGuarantees,,1733844958846.8a8b39bcfc5042b2f61256808771f62a./table:/1733844984115/DeleteFamily/seqid=0 2024-12-10T15:39:11,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742570_1746 (size=5942) 2024-12-10T15:39:11,382 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T15:39:11,582 DEBUG [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-10T15:39:11,675 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/table/ed5f6a61c66b4bdd8d4ed336103270d6 2024-12-10T15:39:11,678 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/info/b2ee866b2de84fad85f551f3e6109363 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/info/b2ee866b2de84fad85f551f3e6109363 2024-12-10T15:39:11,681 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/info/b2ee866b2de84fad85f551f3e6109363, entries=22, sequenceid=93, filesize=7.5 K 2024-12-10T15:39:11,682 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/rep_barrier/70c8cfadce0d4fe0af6ffa3525cb3490 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/rep_barrier/70c8cfadce0d4fe0af6ffa3525cb3490 2024-12-10T15:39:11,685 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/rep_barrier/70c8cfadce0d4fe0af6ffa3525cb3490, entries=6, sequenceid=93, filesize=5.9 K 2024-12-10T15:39:11,686 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/.tmp/table/ed5f6a61c66b4bdd8d4ed336103270d6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/table/ed5f6a61c66b4bdd8d4ed336103270d6 2024-12-10T15:39:11,689 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/table/ed5f6a61c66b4bdd8d4ed336103270d6, entries=9, sequenceid=93, filesize=5.8 K 2024-12-10T15:39:11,690 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 509ms, sequenceid=93, compaction requested=false 2024-12-10T15:39:11,701 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-10T15:39:11,701 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T15:39:11,702 INFO [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-10T15:39:11,702 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-10T15:39:11,702 DEBUG [RS_CLOSE_META-regionserver/bf0fec90ff6d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-10T15:39:11,782 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1250): stopping server bf0fec90ff6d,46239,1733844953049; all regions closed. 2024-12-10T15:39:11,799 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/WALs/bf0fec90ff6d,46239,1733844953049/bf0fec90ff6d%2C46239%2C1733844953049.meta.1733844956678.meta not finished, retry = 0 2024-12-10T15:39:11,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741834_1010 (size=26050) 2024-12-10T15:39:11,806 INFO [regionserver/bf0fec90ff6d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-10T15:39:11,806 INFO [regionserver/bf0fec90ff6d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-10T15:39:11,901 DEBUG [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/oldWALs 2024-12-10T15:39:11,901 INFO [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL bf0fec90ff6d%2C46239%2C1733844953049.meta:.meta(num 1733844956678) 2024-12-10T15:39:11,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741833_1009 (size=17031581) 2024-12-10T15:39:11,907 DEBUG [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/oldWALs 2024-12-10T15:39:11,907 INFO [RS:0;bf0fec90ff6d:46239 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL bf0fec90ff6d%2C46239%2C1733844953049:(num 1733844955894) 2024-12-10T15:39:11,907 DEBUG [RS:0;bf0fec90ff6d:46239 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:11,908 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.LeaseManager(133): Closed leases 2024-12-10T15:39:11,908 INFO [RS:0;bf0fec90ff6d:46239 {}] hbase.ChoreService(370): Chore service for: regionserver/bf0fec90ff6d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-10T15:39:11,908 INFO [regionserver/bf0fec90ff6d:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T15:39:11,908 INFO [RS:0;bf0fec90ff6d:46239 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46239 2024-12-10T15:39:11,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/bf0fec90ff6d,46239,1733844953049 2024-12-10T15:39:11,962 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-10T15:39:11,963 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [bf0fec90ff6d,46239,1733844953049] 2024-12-10T15:39:11,963 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing bf0fec90ff6d,46239,1733844953049; numProcessing=1 2024-12-10T15:39:11,979 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/bf0fec90ff6d,46239,1733844953049 already deleted, retry=false 2024-12-10T15:39:11,979 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; bf0fec90ff6d,46239,1733844953049 expired; onlineServers=0 2024-12-10T15:39:11,979 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'bf0fec90ff6d,33139,1733844951772' ***** 2024-12-10T15:39:11,979 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-10T15:39:11,979 DEBUG [M:0;bf0fec90ff6d:33139 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b1f91a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=bf0fec90ff6d/172.17.0.2:0 2024-12-10T15:39:11,979 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionServer(1224): stopping server bf0fec90ff6d,33139,1733844951772 2024-12-10T15:39:11,979 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionServer(1250): stopping server bf0fec90ff6d,33139,1733844951772; all regions closed. 2024-12-10T15:39:11,979 DEBUG [M:0;bf0fec90ff6d:33139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-10T15:39:11,979 DEBUG [M:0;bf0fec90ff6d:33139 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-10T15:39:11,980 DEBUG [M:0;bf0fec90ff6d:33139 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-10T15:39:11,980 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.large.0-1733844955561 {}] cleaner.HFileCleaner(306): Exit Thread[master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.large.0-1733844955561,5,FailOnTimeoutGroup] 2024-12-10T15:39:11,980 DEBUG [master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.small.0-1733844955564 {}] cleaner.HFileCleaner(306): Exit Thread[master/bf0fec90ff6d:0:becomeActiveMaster-HFileCleaner.small.0-1733844955564,5,FailOnTimeoutGroup] 2024-12-10T15:39:11,980 INFO [M:0;bf0fec90ff6d:33139 {}] hbase.ChoreService(370): Chore service for: master/bf0fec90ff6d:0 had [] on shutdown 2024-12-10T15:39:11,980 DEBUG [M:0;bf0fec90ff6d:33139 {}] master.HMaster(1733): Stopping service threads 2024-12-10T15:39:11,980 INFO [M:0;bf0fec90ff6d:33139 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-10T15:39:11,980 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-10T15:39:11,980 ERROR [M:0;bf0fec90ff6d:33139 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:41507 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:41507,5,PEWorkerGroup] 2024-12-10T15:39:11,981 INFO [M:0;bf0fec90ff6d:33139 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-10T15:39:11,981 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-10T15:39:11,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-10T15:39:11,987 DEBUG [M:0;bf0fec90ff6d:33139 {}] zookeeper.ZKUtil(347): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-10T15:39:11,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-10T15:39:11,987 WARN [M:0;bf0fec90ff6d:33139 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-10T15:39:11,987 INFO [M:0;bf0fec90ff6d:33139 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-10T15:39:11,988 INFO [M:0;bf0fec90ff6d:33139 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-10T15:39:11,988 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-10T15:39:11,988 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:39:11,988 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:39:11,988 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-10T15:39:11,988 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:39:11,988 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-10T15:39:11,988 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=789.46 KB heapSize=972.09 KB 2024-12-10T15:39:12,005 DEBUG [M:0;bf0fec90ff6d:33139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b862f1a64e2449199b2600bbc9f7722 is 82, key is hbase:meta,,1/info:regioninfo/1733844956875/Put/seqid=0 2024-12-10T15:39:12,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742571_1747 (size=5672) 2024-12-10T15:39:12,047 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2254 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b862f1a64e2449199b2600bbc9f7722 2024-12-10T15:39:12,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:39:12,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46239-0x100109579e40001, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:39:12,071 INFO [RS:0;bf0fec90ff6d:46239 {}] regionserver.HRegionServer(1307): Exiting; stopping=bf0fec90ff6d,46239,1733844953049; zookeeper connection closed. 2024-12-10T15:39:12,071 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@92e361c {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@92e361c 2024-12-10T15:39:12,071 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-10T15:39:12,072 DEBUG [M:0;bf0fec90ff6d:33139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0504b9d64f164014b6aadcd851e59ef6 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\x96/proc:d/1733845123216/Put/seqid=0 2024-12-10T15:39:12,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742572_1748 (size=45478) 2024-12-10T15:39:12,089 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=788.91 KB at sequenceid=2254 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0504b9d64f164014b6aadcd851e59ef6 2024-12-10T15:39:12,098 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0504b9d64f164014b6aadcd851e59ef6 2024-12-10T15:39:12,131 DEBUG [M:0;bf0fec90ff6d:33139 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ea53dda75674722bffef0f7aa99ff78 is 69, key is bf0fec90ff6d,46239,1733844953049/rs:state/1733844955659/Put/seqid=0 2024-12-10T15:39:12,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073742573_1749 (size=5156) 2024-12-10T15:39:12,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-10T15:39:12,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-10T15:39:12,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-10T15:39:12,416 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-10T15:39:12,543 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2254 (bloomFilter=true), to=hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ea53dda75674722bffef0f7aa99ff78 2024-12-10T15:39:12,546 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/4b862f1a64e2449199b2600bbc9f7722 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4b862f1a64e2449199b2600bbc9f7722 2024-12-10T15:39:12,549 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/4b862f1a64e2449199b2600bbc9f7722, entries=8, sequenceid=2254, filesize=5.5 K 2024-12-10T15:39:12,550 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0504b9d64f164014b6aadcd851e59ef6 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0504b9d64f164014b6aadcd851e59ef6 2024-12-10T15:39:12,553 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0504b9d64f164014b6aadcd851e59ef6 2024-12-10T15:39:12,553 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0504b9d64f164014b6aadcd851e59ef6, entries=181, sequenceid=2254, filesize=44.4 K 2024-12-10T15:39:12,554 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ea53dda75674722bffef0f7aa99ff78 as hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ea53dda75674722bffef0f7aa99ff78 2024-12-10T15:39:12,556 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41507/user/jenkins/test-data/1d646439-9d69-31f0-221a-3c4af71cd935/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ea53dda75674722bffef0f7aa99ff78, entries=1, sequenceid=2254, filesize=5.0 K 2024-12-10T15:39:12,557 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(3040): Finished flush of dataSize ~789.46 KB/808410, heapSize ~971.80 KB/995120, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 569ms, sequenceid=2254, compaction requested=false 2024-12-10T15:39:12,567 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-10T15:39:12,567 DEBUG [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-10T15:39:12,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46053 is added to blk_1073741830_1006 (size=956167) 2024-12-10T15:39:12,576 INFO [M:0;bf0fec90ff6d:33139 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-10T15:39:12,576 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-10T15:39:12,576 INFO [M:0;bf0fec90ff6d:33139 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33139 2024-12-10T15:39:12,620 DEBUG [M:0;bf0fec90ff6d:33139 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/bf0fec90ff6d,33139,1733844951772 already deleted, retry=false 2024-12-10T15:39:12,765 INFO [M:0;bf0fec90ff6d:33139 {}] regionserver.HRegionServer(1307): Exiting; stopping=bf0fec90ff6d,33139,1733844951772; zookeeper connection closed. 2024-12-10T15:39:12,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:39:12,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33139-0x100109579e40000, quorum=127.0.0.1:56346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-10T15:39:12,776 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-10T15:39:12,778 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:39:12,779 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:39:12,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:39:12,779 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.log.dir/,STOPPED} 2024-12-10T15:39:12,782 WARN [BP-2139375084-172.17.0.2-1733844946170 heartbeating to localhost/127.0.0.1:41507 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-10T15:39:12,782 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-10T15:39:12,782 WARN [BP-2139375084-172.17.0.2-1733844946170 heartbeating to localhost/127.0.0.1:41507 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2139375084-172.17.0.2-1733844946170 (Datanode Uuid 7b36a362-3475-4661-8cbb-c0345cf63144) service to localhost/127.0.0.1:41507 2024-12-10T15:39:12,782 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-10T15:39:12,784 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data1/current/BP-2139375084-172.17.0.2-1733844946170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:39:12,793 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/cluster_17d56c0b-d68d-6c1b-ce5b-c524a0c95074/dfs/data/data2/current/BP-2139375084-172.17.0.2-1733844946170 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-10T15:39:12,794 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-10T15:39:12,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-10T15:39:12,802 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-10T15:39:12,802 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-10T15:39:12,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-10T15:39:12,802 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6943fa3-b0df-716c-0c50-9e524ccfacc0/hadoop.log.dir/,STOPPED} 2024-12-10T15:39:12,835 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-10T15:39:13,027 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down